summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/Dominators/2006-10-02-BreakCritEdges.ll2
-rw-r--r--test/Analysis/ScalarEvolution/limit-depth.ll58
-rw-r--r--test/Bitcode/thinlto-alias.ll2
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph-pgo.ll2
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll2
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll2
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph.ll2
-rw-r--r--test/Bitcode/thinlto-function-summary-refgraph.ll2
-rw-r--r--test/Bitcode/thinlto-function-summary.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll19
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll65
-rw-r--r--test/CodeGen/AArch64/GlobalISel/call-translator.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-combines.mir8
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll7
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir85
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-undef.mir15
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-trunc.mir4
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-spill-remarks.ll27
-rw-r--r--test/CodeGen/AArch64/ccmp-successor-probs.mir46
-rw-r--r--test/CodeGen/AArch64/cond-br-tuning.ll8
-rw-r--r--test/CodeGen/AMDGPU/alignbit-pat.ll100
-rw-r--r--test/CodeGen/AMDGPU/bug-vopc-commute.ll6
-rw-r--r--test/CodeGen/AMDGPU/cgp-bitfield-extract.ll9
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll10
-rw-r--r--test/CodeGen/AMDGPU/combine-and-sext-bool.ll27
-rw-r--r--test/CodeGen/AMDGPU/combine-cond-add-sub.ll20
-rw-r--r--test/CodeGen/AMDGPU/fold-fmul-to-neg-abs.ll37
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.load.dword.ll22
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll20
-rw-r--r--test/CodeGen/AMDGPU/misched-killflags.mir45
-rw-r--r--test/CodeGen/AMDGPU/mubuf.ll22
-rw-r--r--test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir69
-rw-r--r--test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir155
-rw-r--r--test/CodeGen/AMDGPU/ret_jump.ll7
-rw-r--r--test/CodeGen/AMDGPU/scheduler-subrange-crash.ll12
-rw-r--r--test/CodeGen/AMDGPU/sdwa-peephole-instr.mir446
-rw-r--r--test/CodeGen/AMDGPU/select-vectors.ll2
-rw-r--r--test/CodeGen/AMDGPU/setcc-sext.ll292
-rw-r--r--test/CodeGen/AMDGPU/sgpr-copy.ll94
-rw-r--r--test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll22
-rw-r--r--test/CodeGen/AMDGPU/shift-i64-opts.ll74
-rw-r--r--test/CodeGen/AMDGPU/si-lod-bias.ll17
-rw-r--r--test/CodeGen/AMDGPU/si-sgpr-spill.ll398
-rw-r--r--test/CodeGen/AMDGPU/si-spill-cf.ll136
-rw-r--r--test/CodeGen/AMDGPU/smrd.ll48
-rw-r--r--test/CodeGen/AMDGPU/spill-to-smem-m0.ll22
-rw-r--r--test/CodeGen/AMDGPU/split-smrd.ll4
-rw-r--r--test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll20
-rw-r--r--test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll2
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir73
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll10
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel.ll20
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalizer.mir55
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir31
-rw-r--r--test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll2
-rw-r--r--test/CodeGen/ARM/Windows/no-arm-mode.ll10
-rw-r--r--test/CodeGen/ARM/Windows/tls.ll14
-rw-r--r--test/CodeGen/ARM/alloca.ll4
-rw-r--r--test/CodeGen/ARM/arg-copy-elide.ll4
-rw-r--r--test/CodeGen/ARM/arm-abi-attr.ll2
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll2
-rw-r--r--test/CodeGen/ARM/arm-position-independence-jump-table.ll2
-rw-r--r--test/CodeGen/ARM/arm-shrink-wrapping-linux.ll10
-rw-r--r--test/CodeGen/ARM/atomic-cmpxchg.ll4
-rw-r--r--test/CodeGen/ARM/bool-ext-inc.ll28
-rw-r--r--test/CodeGen/ARM/cmpxchg-O0-be.ll26
-rw-r--r--test/CodeGen/ARM/cmpxchg-weak.ll4
-rw-r--r--test/CodeGen/ARM/code-placement.ll5
-rw-r--r--test/CodeGen/ARM/constantfp.ll12
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-basic.ll6
-rw-r--r--test/CodeGen/ARM/cortexr52-misched-basic.ll4
-rw-r--r--test/CodeGen/ARM/ctor_order.ll2
-rw-r--r--test/CodeGen/ARM/ctors_dtors.ll2
-rw-r--r--test/CodeGen/ARM/cttz.ll4
-rw-r--r--test/CodeGen/ARM/cttz_vector.ll64
-rw-r--r--test/CodeGen/ARM/cxx-tlscc.ll2
-rw-r--r--test/CodeGen/ARM/execute-only-big-stack-frame.ll6
-rw-r--r--test/CodeGen/ARM/execute-only-section.ll6
-rw-r--r--test/CodeGen/ARM/execute-only.ll6
-rw-r--r--test/CodeGen/ARM/fp16-promote.ll29
-rw-r--r--test/CodeGen/ARM/fp16-v3.ll4
-rw-r--r--test/CodeGen/ARM/ifcvt7.ll2
-rw-r--r--test/CodeGen/ARM/illegal-bitfield-loadstore.ll6
-rw-r--r--test/CodeGen/ARM/indirectbr.ll4
-rw-r--r--test/CodeGen/ARM/jump-table-islands.ll2
-rw-r--r--test/CodeGen/ARM/jump-table-tbh.ll6
-rw-r--r--test/CodeGen/ARM/ldm-stm-i256.ll20
-rw-r--r--test/CodeGen/ARM/legalize-unaligned-load.ll2
-rw-r--r--test/CodeGen/ARM/long-setcc.ll2
-rw-r--r--test/CodeGen/ARM/long_shift.ll16
-rw-r--r--test/CodeGen/ARM/misched-fusion-aes.ll6
-rw-r--r--test/CodeGen/ARM/select_const.ll8
-rw-r--r--test/CodeGen/ARM/shift-i64.ll2
-rw-r--r--test/CodeGen/ARM/ssp-data-layout.ll2
-rw-r--r--test/CodeGen/ARM/str_pre-2.ll2
-rw-r--r--test/CodeGen/ARM/swifterror.ll52
-rw-r--r--test/CodeGen/ARM/thumb2-it-block.ll4
-rw-r--r--test/CodeGen/ARM/vcgt.ll4
-rw-r--r--test/CodeGen/ARM/vector-DAGCombine.ll10
-rw-r--r--test/CodeGen/ARM/vext.ll58
-rw-r--r--test/CodeGen/ARM/vfp.ll4
-rw-r--r--test/CodeGen/ARM/vld1.ll2
-rw-r--r--test/CodeGen/ARM/vld2.ll16
-rw-r--r--test/CodeGen/ARM/vld3.ll16
-rw-r--r--test/CodeGen/ARM/vld4.ll24
-rw-r--r--test/CodeGen/ARM/vlddup.ll54
-rw-r--r--test/CodeGen/ARM/vldlane.ll2
-rw-r--r--test/CodeGen/ARM/vpadd.ll22
-rw-r--r--test/CodeGen/ARM/vst1.ll2
-rw-r--r--test/CodeGen/ARM/vst4.ll8
-rw-r--r--test/CodeGen/ARM/vstlane.ll6
-rw-r--r--test/CodeGen/ARM/vuzp.ll269
-rw-r--r--test/CodeGen/BPF/remove_truncate_1.ll87
-rw-r--r--test/CodeGen/BPF/remove_truncate_2.ll65
-rw-r--r--test/CodeGen/Hexagon/addrmode-keepdeadphis.mir30
-rw-r--r--test/CodeGen/Hexagon/expand-condsets-undefvni.ll49
-rw-r--r--test/CodeGen/Hexagon/expand-vselect-kill.ll53
-rw-r--r--test/CodeGen/Hexagon/fpelim-basic.ll91
-rw-r--r--test/CodeGen/Hexagon/frame.ll23
-rw-r--r--test/CodeGen/Hexagon/jt-in-text.ll57
-rw-r--r--test/CodeGen/Hexagon/newvaluejump-kill2.mir18
-rw-r--r--test/CodeGen/Hexagon/newvaluejump2.ll2
-rw-r--r--test/CodeGen/Hexagon/regalloc-liveout-undef.mir35
-rw-r--r--test/CodeGen/MIR/Generic/multiRunPass.mir3
-rw-r--r--test/CodeGen/Mips/2008-06-05-Carry.ll13
-rw-r--r--test/CodeGen/Mips/dsp-patterns.ll4
-rw-r--r--test/CodeGen/Mips/llcarry.ll11
-rw-r--r--test/CodeGen/Mips/llvm-ir/add.ll394
-rw-r--r--test/CodeGen/Mips/llvm-ir/sub.ll174
-rw-r--r--test/CodeGen/Mips/madd-msub.ll81
-rw-r--r--test/CodeGen/NVPTX/lower-aggr-copies.ll4
-rw-r--r--test/CodeGen/PowerPC/anon_aggr.ll64
-rw-r--r--test/CodeGen/PowerPC/floatPSA.ll2
-rw-r--r--test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll32
-rw-r--r--test/CodeGen/PowerPC/memcmp.ll131
-rw-r--r--test/CodeGen/PowerPC/memcmpIR.ll90
-rw-r--r--test/CodeGen/PowerPC/merge_stores_dereferenceable.ll24
-rw-r--r--test/CodeGen/PowerPC/ppc64-align-long-double.ll24
-rw-r--r--test/CodeGen/PowerPC/tls.ll2
-rw-r--r--test/CodeGen/PowerPC/tls_get_addr_fence1.mir66
-rw-r--r--test/CodeGen/PowerPC/tls_get_addr_fence2.mir65
-rw-r--r--test/CodeGen/Thumb/long-setcc.ll2
-rw-r--r--test/CodeGen/Thumb2/constant-islands-new-island.ll6
-rw-r--r--test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll154
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt2.ll3
-rw-r--r--test/CodeGen/WebAssembly/exception.ll22
-rw-r--r--test/CodeGen/X86/GlobalISel/and-scalar.ll43
-rw-r--r--test/CodeGen/X86/GlobalISel/fadd-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/fdiv-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/fmul-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/fsub-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir124
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir124
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir124
-rw-r--r--test/CodeGen/X86/GlobalISel/or-scalar.ll43
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir235
-rw-r--r--test/CodeGen/X86/GlobalISel/select-add.mir80
-rw-r--r--test/CodeGen/X86/GlobalISel/select-and-scalar.mir160
-rw-r--r--test/CodeGen/X86/GlobalISel/select-constant.mir21
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/select-merge-vec256.mir52
-rw-r--r--test/CodeGen/X86/GlobalISel/select-merge-vec512.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/select-or-scalar.mir160
-rw-r--r--test/CodeGen/X86/GlobalISel/select-sub.mir77
-rw-r--r--test/CodeGen/X86/GlobalISel/select-xor-scalar.mir160
-rw-r--r--test/CodeGen/X86/GlobalISel/xor-scalar.ll43
-rw-r--r--test/CodeGen/X86/atom-call-reg-indirect.ll2
-rw-r--r--test/CodeGen/X86/atom-fixup-lea2.ll2
-rw-r--r--test/CodeGen/X86/atom-sched.ll1
-rw-r--r--test/CodeGen/X86/avx2-arith.ll106
-rw-r--r--test/CodeGen/X86/avx2-cmp.ll36
-rwxr-xr-xtest/CodeGen/X86/avx2-conversions.ll74
-rw-r--r--test/CodeGen/X86/avx2-fma-fneg-combine.ll32
-rw-r--r--test/CodeGen/X86/avx2-gather.ll28
-rw-r--r--test/CodeGen/X86/avx2-logic.ll34
-rw-r--r--test/CodeGen/X86/avx2-phaddsub.ll36
-rw-r--r--test/CodeGen/X86/avx2-shift.ll170
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll168
-rwxr-xr-xtest/CodeGen/X86/avx2-vperm.ll20
-rw-r--r--test/CodeGen/X86/avx512-arith.ll258
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll404
-rw-r--r--test/CodeGen/X86/avx512vl-vec-masked-cmp.ll13485
-rw-r--r--test/CodeGen/X86/bswap-vector.ll11
-rw-r--r--test/CodeGen/X86/bswap-wide-int.ll173
-rw-r--r--test/CodeGen/X86/compress_expand.ll8
-rw-r--r--test/CodeGen/X86/cpus.ll2
-rw-r--r--test/CodeGen/X86/fp128-cast.ll16
-rw-r--r--test/CodeGen/X86/insertelement-zero.ll15
-rw-r--r--test/CodeGen/X86/lower-vec-shift.ll7
-rw-r--r--test/CodeGen/X86/lower-vec-shuffle-bug.ll11
-rw-r--r--test/CodeGen/X86/masked_memop.ll16
-rw-r--r--test/CodeGen/X86/memcmp.ll782
-rw-r--r--test/CodeGen/X86/palignr.ll259
-rw-r--r--test/CodeGen/X86/peephole-recurrence.mir232
-rw-r--r--test/CodeGen/X86/sbb.ll80
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v16.ll57
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v8.ll88
-rw-r--r--test/CodeGen/X86/vector-truncate-combine.ll35
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll58
-rw-r--r--test/DebugInfo/COFF/lines-bb-start.ll97
-rw-r--r--test/DebugInfo/COFF/local-variables.ll2
-rw-r--r--test/DebugInfo/PDB/pdbdump-headers.test128
-rw-r--r--test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test2
-rw-r--r--test/DebugInfo/dwarfdump-accel.test5
-rw-r--r--test/Feature/optnone-opt.ll3
-rw-r--r--test/Instrumentation/MemorySanitizer/msan_basic.ll4
-rw-r--r--test/LTO/Resolution/X86/Inputs/comdat-mixed-lto.ll23
-rw-r--r--test/LTO/Resolution/X86/comdat-mixed-lto.ll42
-rw-r--r--test/MC/AArch64/coff-basic.ll8
-rw-r--r--test/MC/AMDGPU/code-object-metadata-kernel-args.s4
-rw-r--r--test/MC/AMDGPU/code-object-metadata-kernel-attrs.s4
-rw-r--r--test/MC/AVR/out-of-range-fixups/adiw-fail.s5
-rw-r--r--test/MC/AVR/out-of-range-fixups/in-fail.s5
-rw-r--r--test/MC/AVR/out-of-range-fixups/lds-fail.s5
-rw-r--r--test/MC/AVR/out-of-range-fixups/sbi-fail.s5
-rw-r--r--test/MC/Disassembler/SystemZ/insns.txt2025
-rw-r--r--test/MC/Mips/macro-dla-bad.s21
-rw-r--r--test/MC/Mips/macro-dla-pic.s50
-rw-r--r--test/MC/Mips/micromips64r6/valid.s4
-rw-r--r--test/MC/Mips/mips3/valid.s4
-rw-r--r--test/MC/Mips/mips4/valid.s4
-rw-r--r--test/MC/Mips/mips5/valid.s4
-rw-r--r--test/MC/Mips/mips64/valid.s4
-rw-r--r--test/MC/Mips/mips64r2/valid.s4
-rw-r--r--test/MC/Mips/mips64r3/valid.s4
-rw-r--r--test/MC/Mips/mips64r5/valid.s4
-rw-r--r--test/MC/Mips/mips64r6/valid.s10
-rw-r--r--test/MC/SystemZ/insn-bad-z196.s21
-rw-r--r--test/MC/SystemZ/insn-bad-zEC12.s14
-rw-r--r--test/MC/SystemZ/insn-bad.s850
-rw-r--r--test/MC/SystemZ/insn-good-z196.s180
-rw-r--r--test/MC/SystemZ/insn-good-zEC12.s14
-rw-r--r--test/MC/SystemZ/insn-good.s1411
-rw-r--r--test/MC/SystemZ/regs-bad.s63
-rw-r--r--test/MC/SystemZ/regs-good.s51
-rw-r--r--test/MC/WebAssembly/unnamed-data.ll94
-rw-r--r--test/MC/WebAssembly/weak-alias.ll35
-rw-r--r--test/MC/WebAssembly/weak.ll36
-rw-r--r--test/MC/X86/intel-syntax-bitwise-ops.s17
-rw-r--r--test/MC/X86/signed-coff-pcrel.s12
-rw-r--r--test/Object/X86/irsymtab-asm.ll17
-rw-r--r--test/Object/X86/irsymtab-bad-alias.ll15
-rw-r--r--test/Object/X86/irsymtab.ll33
-rw-r--r--test/Object/X86/yaml-elf-x86-rel-broken.yaml29
-rw-r--r--test/ObjectYAML/wasm/weak_symbols.yaml21
-rw-r--r--test/Other/new-pm-defaults.ll1
-rw-r--r--test/Other/new-pm-thinlto-defaults.ll13
-rw-r--r--test/ThinLTO/X86/autoupgrade.ll2
-rw-r--r--test/Transforms/BBVectorize/X86/cmp-types.ll16
-rw-r--r--test/Transforms/BBVectorize/X86/loop1.ll61
-rw-r--r--test/Transforms/BBVectorize/X86/pr15289.ll95
-rw-r--r--test/Transforms/BBVectorize/X86/sh-rec.ll54
-rw-r--r--test/Transforms/BBVectorize/X86/sh-rec2.ll85
-rw-r--r--test/Transforms/BBVectorize/X86/sh-rec3.ll170
-rw-r--r--test/Transforms/BBVectorize/X86/sh-types.ll25
-rw-r--r--test/Transforms/BBVectorize/X86/simple-int.ll79
-rw-r--r--test/Transforms/BBVectorize/X86/simple-ldstr.ll29
-rw-r--r--test/Transforms/BBVectorize/X86/simple.ll120
-rw-r--r--test/Transforms/BBVectorize/X86/vs-cast.ll12
-rw-r--r--test/Transforms/BBVectorize/X86/wr-aliases.ll144
-rw-r--r--test/Transforms/BBVectorize/cycle.ll112
-rw-r--r--test/Transforms/BBVectorize/func-alias.ll244
-rw-r--r--test/Transforms/BBVectorize/ld1.ll41
-rw-r--r--test/Transforms/BBVectorize/lit.local.cfg3
-rw-r--r--test/Transforms/BBVectorize/loop1.ll93
-rw-r--r--test/Transforms/BBVectorize/mem-op-depth.ll22
-rw-r--r--test/Transforms/BBVectorize/metadata.ll49
-rw-r--r--test/Transforms/BBVectorize/no-ldstr-conn.ll23
-rw-r--r--test/Transforms/BBVectorize/req-depth.ll17
-rw-r--r--test/Transforms/BBVectorize/search-limit.ll46
-rw-r--r--test/Transforms/BBVectorize/simple-int.ll506
-rw-r--r--test/Transforms/BBVectorize/simple-ldstr-ptrs.ll134
-rw-r--r--test/Transforms/BBVectorize/simple-ldstr.ll170
-rw-r--r--test/Transforms/BBVectorize/simple-sel.ll59
-rw-r--r--test/Transforms/BBVectorize/simple-tst.ll18
-rw-r--r--test/Transforms/BBVectorize/simple.ll199
-rw-r--r--test/Transforms/BBVectorize/simple3.ll35
-rw-r--r--test/Transforms/BBVectorize/vector-sel.ll33
-rw-r--r--test/Transforms/BBVectorize/xcore/no-vector-registers.ll18
-rw-r--r--test/Transforms/CodeExtractor/BlockAddressReference.ll36
-rw-r--r--test/Transforms/CodeExtractor/BlockAddressSelfReference.ll50
-rw-r--r--test/Transforms/CodeGenPrepare/X86/memcmp.ll126
-rw-r--r--test/Transforms/CodeGenPrepare/nonintegral.ll68
-rw-r--r--test/Transforms/ConstantHoisting/ARM/gep-struct-index.ll37
-rw-r--r--test/Transforms/Inline/AArch64/inline-target-attr.ll40
-rw-r--r--test/Transforms/Inline/inline-cold-callsite-pgo.ll54
-rw-r--r--test/Transforms/Inline/inline-cold-callsite.ll79
-rw-r--r--test/Transforms/Inline/optimization-remarks-yaml.ll18
-rw-r--r--test/Transforms/Inline/pr33637.ll25
-rw-r--r--test/Transforms/InstCombine/and-or-not.ll96
-rw-r--r--test/Transforms/InstCombine/clamp-to-minmax.ll500
-rw-r--r--test/Transforms/InstCombine/extractinsert-tbaa.ll45
-rw-r--r--test/Transforms/InstCombine/ffs-1.ll12
-rw-r--r--test/Transforms/InstCombine/icmp.ll80
-rw-r--r--test/Transforms/InstCombine/logical-select.ll26
-rw-r--r--test/Transforms/InstCombine/max-of-nots.ll17
-rw-r--r--test/Transforms/InstCombine/memmove.ll48
-rw-r--r--test/Transforms/InstCombine/memset.ll17
-rw-r--r--test/Transforms/InstCombine/mul.ll28
-rw-r--r--test/Transforms/InstCombine/or-xor.ll76
-rw-r--r--test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll23
-rw-r--r--test/Transforms/InstCombine/select-with-bitwise-ops.ll36
-rw-r--r--test/Transforms/InstCombine/select.ll26
-rw-r--r--test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll25
-rw-r--r--test/Transforms/JumpThreading/range-compare.ll125
-rw-r--r--test/Transforms/LICM/dropped-tbaa.ll90
-rw-r--r--test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll169
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll279
-rw-r--r--test/Transforms/LoopUnroll/unroll-maxcount.ll31
-rw-r--r--test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll31
-rw-r--r--test/Transforms/LoopVectorize/first-order-recurrence.ll139
-rw-r--r--test/Transforms/LoopVectorize/if-conversion.ll2
-rw-r--r--test/Transforms/LoopVectorize/minmax_reduction.ll8
-rw-r--r--test/Transforms/LoopVectorize/small-loop.ll6
-rw-r--r--test/Transforms/LowerTypeTests/export-icall.ll9
-rw-r--r--test/Transforms/Reassociate/erase_inst_made_change.ll29
-rw-r--r--test/Transforms/SLPVectorizer/X86/limit.ll70
-rw-r--r--test/Transforms/SROA/alloca-address-space.ll31
-rw-r--r--test/Transforms/SROA/preserve-nonnull.ll94
-rw-r--r--test/Transforms/SimplifyCFG/Hexagon/lit.local.cfg5
-rw-r--r--test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll62
-rw-r--r--test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll2
-rw-r--r--test/Transforms/SimplifyCFG/X86/switch-table-bug.ll2
-rw-r--r--test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll32
-rw-r--r--test/tools/llvm-cvtres/symbols.test33
-rw-r--r--test/tools/llvm-dwarfdump/X86/apple_names_verify_buckets.s192
-rw-r--r--test/tools/llvm-dwarfdump/X86/apple_names_verify_data.s64
-rw-r--r--test/tools/llvm-dwarfdump/X86/apple_names_verify_form.s58
-rw-r--r--test/tools/llvm-dwarfdump/X86/apple_names_verify_num_atoms.s59
-rw-r--r--test/tools/llvm-dwarfdump/X86/no_apple_names_verify.s33
-rw-r--r--test/tools/llvm-dwarfdump/X86/no_apple_names_verify_buckets.s174
-rw-r--r--test/tools/llvm-nm/X86/demangle.ll37
-rw-r--r--test/tools/llvm-nm/wasm/weak-symbols.yaml2
-rw-r--r--test/tools/llvm-objdump/ARM/invalid-instruction.s9
-rw-r--r--test/tools/llvm-objdump/WebAssembly/lit.local.cfg2
-rw-r--r--test/tools/llvm-objdump/WebAssembly/relocations.test8
-rw-r--r--test/tools/llvm-pdbdump/partial-type-stream.test30
-rw-r--r--test/tools/llvm-readobj/Inputs/trivial.obj.coff-arm64bin0 -> 141 bytes
-rw-r--r--test/tools/llvm-readobj/file-headers.test18
-rw-r--r--test/tools/llvm-readobj/peplus.test1
-rw-r--r--test/tools/llvm-readobj/symbols.test4
350 files changed, 29560 insertions, 6640 deletions
diff --git a/test/Analysis/Dominators/2006-10-02-BreakCritEdges.ll b/test/Analysis/Dominators/2006-10-02-BreakCritEdges.ll
index a8013176977da..c036fe22ab87e 100644
--- a/test/Analysis/Dominators/2006-10-02-BreakCritEdges.ll
+++ b/test/Analysis/Dominators/2006-10-02-BreakCritEdges.ll
@@ -2,7 +2,7 @@
; RUN: opt < %s -passes='require<domtree>,break-crit-edges,print<domtree>' -disable-output 2>&1| FileCheck %s
; PR932
-; CHECK: [3] %brtrue {1,2}
+; CHECK: [3] %brtrue {{{[0-9]+}},{{[0-9]+}}}
declare void @use1(i32)
diff --git a/test/Analysis/ScalarEvolution/limit-depth.ll b/test/Analysis/ScalarEvolution/limit-depth.ll
index 5a35bfefd20a0..f4154130233b0 100644
--- a/test/Analysis/ScalarEvolution/limit-depth.ll
+++ b/test/Analysis/ScalarEvolution/limit-depth.ll
@@ -1,4 +1,4 @@
-; RUN: opt -scalar-evolution-max-arith-depth=0 -analyze -scalar-evolution < %s | FileCheck %s
+; RUN: opt -scalar-evolution-max-arith-depth=0 -scalar-evolution-max-ext-depth=0 -analyze -scalar-evolution < %s | FileCheck %s
; Check that depth set to 0 prevents getAddExpr and getMulExpr from making
; transformations in SCEV. We expect the result to be very straightforward.
@@ -42,3 +42,59 @@ define void @test_mul(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
%s2 = mul i32 %s1, %p3
ret void
}
+
+define void @test_sext(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
+; CHECK-LABEL: @test_sext
+; CHECK: %se2 = sext i64 %iv2.inc to i128
+; CHECK-NEXT: --> {(1 + (sext i64 {(sext i32 (1 + %a) to i64),+,1}<nsw><%loop> to i128))<nsw>,+,1}<nsw><%loop2>
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ %a, %entry ], [ %iv.inc, %loop ]
+ %iv.inc = add nsw i32 %iv, 1
+ %cond = icmp sle i32 %iv.inc, 50
+ br i1 %cond, label %loop, label %between
+
+between:
+ %se = sext i32 %iv.inc to i64
+ br label %loop2
+
+loop2:
+ %iv2 = phi i64 [ %se, %between ], [ %iv2.inc, %loop2 ]
+ %iv2.inc = add nsw i64 %iv2, 1
+ %cond2 = icmp sle i64 %iv2.inc, 50
+ br i1 %cond2, label %loop2, label %exit
+
+exit:
+ %se2 = sext i64 %iv2.inc to i128
+ ret void
+}
+
+define void @test_zext(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
+; CHECK-LABEL: @test_zext
+; CHECK: %ze2 = zext i64 %iv2.inc to i128
+; CHECK-NEXT: --> {(1 + (zext i64 {7,+,1}<nuw><nsw><%loop> to i128))<nuw><nsw>,+,1}<nuw><%loop2>
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 6, %entry ], [ %iv.inc, %loop ]
+ %iv.inc = add nsw i32 %iv, 1
+ %cond = icmp sle i32 %iv.inc, 50
+ br i1 %cond, label %loop, label %between
+
+between:
+ %ze = zext i32 %iv.inc to i64
+ br label %loop2
+
+loop2:
+ %iv2 = phi i64 [ %ze, %between ], [ %iv2.inc, %loop2 ]
+ %iv2.inc = add nuw i64 %iv2, 1
+ %cond2 = icmp sle i64 %iv2.inc, 50
+ br i1 %cond2, label %loop2, label %exit
+
+exit:
+ %ze2 = zext i64 %iv2.inc to i128
+ ret void
+}
diff --git a/test/Bitcode/thinlto-alias.ll b/test/Bitcode/thinlto-alias.ll
index 2c235f0620ecb..81fbb767ba941 100644
--- a/test/Bitcode/thinlto-alias.ll
+++ b/test/Bitcode/thinlto-alias.ll
@@ -18,7 +18,7 @@
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'mainanalias'
+; CHECK-NEXT: blob data = 'mainanalias{{.*}}'
; COMBINED: <GLOBALVAL_SUMMARY_BLOCK
; COMBINED-NEXT: <VERSION
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll b/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll
index 7f9d6d95f506b..ef5fb36d8e05f 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-pgo.ll
@@ -20,7 +20,7 @@
; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=1 op5=2/>
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'mainfunc'
+; CHECK-NEXT: blob data = 'mainfunc{{.*}}'
; COMBINED: <GLOBALVAL_SUMMARY_BLOCK
; COMBINED-NEXT: <VERSION
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
index b64d5bd52bfca..a4d259add6093 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
@@ -33,7 +33,7 @@
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'hot_functionhot1hot2hot3hot4coldnone1none2none3'
+; CHECK-NEXT: blob data = 'hot_functionhot1hot2hot3hot4coldnone1none2none3{{.*}}'
; COMBINED: <GLOBALVAL_SUMMARY_BLOCK
; COMBINED-NEXT: <VERSION
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
index 875f397646a65..b62090efe20b9 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-sample-profile-summary.ll
@@ -33,7 +33,7 @@
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'hot_functionhot1hot2hot3hot4coldnone1none2none3'
+; CHECK-NEXT: blob data = 'hot_functionhot1hot2hot3hot4coldnone1none2none3{{.*}}'
; COMBINED: <GLOBALVAL_SUMMARY_BLOCK
; COMBINED-NEXT: <VERSION
diff --git a/test/Bitcode/thinlto-function-summary-callgraph.ll b/test/Bitcode/thinlto-function-summary-callgraph.ll
index 566f3a077e7bf..749909badd956 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph.ll
@@ -21,7 +21,7 @@
; CHECK-NEXT: <PERMODULE {{.*}} op3=1
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'undefinedglobmainfunc'
+; CHECK-NEXT: blob data = 'undefinedglobmainfunc{{.*}}'
; COMBINED: <GLOBALVAL_SUMMARY_BLOCK
diff --git a/test/Bitcode/thinlto-function-summary-refgraph.ll b/test/Bitcode/thinlto-function-summary-refgraph.ll
index b52fce7917911..47a44b7892238 100644
--- a/test/Bitcode/thinlto-function-summary-refgraph.ll
+++ b/test/Bitcode/thinlto-function-summary-refgraph.ll
@@ -62,7 +62,7 @@
; CHECK: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'barglobalvarfuncfunc2foofunc3WXYZllvm.ctpop.i8main'
+; CHECK-NEXT: blob data = 'barglobalvarfuncfunc2foofunc3WXYZllvm.ctpop.i8main{{.*}}'
; ModuleID = 'thinlto-function-summary-refgraph.ll'
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/Bitcode/thinlto-function-summary.ll b/test/Bitcode/thinlto-function-summary.ll
index 6b8bfbb292cd2..5922a8b3c4d0b 100644
--- a/test/Bitcode/thinlto-function-summary.ll
+++ b/test/Bitcode/thinlto-function-summary.ll
@@ -24,7 +24,7 @@
; BC-NEXT: <ALIAS {{.*}} op0=5 op1=0 op2=3
; BC-NEXT: </GLOBALVAL_SUMMARY_BLOCK
; BC: <STRTAB_BLOCK
-; BC-NEXT: blob data = 'hfoobaranon.{{................................}}.0variadicf'
+; BC-NEXT: blob data = 'hfoobaranon.{{................................}}.0variadicf{{.*}}'
; RUN: opt -name-anon-globals -module-summary < %s | llvm-dis | FileCheck %s
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index 0298315a55105..48f500eb36b50 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -158,15 +158,30 @@ define fp128 @test_quad_dump() {
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(p0) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg2; (in function: vector_of_pointers_extractelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement:
+@var = global <2 x i16*> zeroinitializer
define void @vector_of_pointers_extractelement() {
- %dummy = extractelement <2 x i16*> undef, i32 0
+ br label %end
+
+block:
+ %dummy = extractelement <2 x i16*> %vec, i32 0
ret void
+
+end:
+ %vec = load <2 x i16*>, <2 x i16*>* undef
+ br label %block
}
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(<2 x p0>) = G_INSERT_VECTOR_ELT %vreg1, %vreg2, %vreg3; (in function: vector_of_pointers_insertelement
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
define void @vector_of_pointers_insertelement() {
- %dummy = insertelement <2 x i16*> undef, i16* null, i32 0
+ br label %end
+
+block:
+ %dummy = insertelement <2 x i16*> %vec, i16* null, i32 0
ret void
+
+end:
+ %vec = load <2 x i16*>, <2 x i16*>* undef
+ br label %block
}
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index 81b42d0648105..50ad83feed859 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -577,7 +577,7 @@ define i32 @constant_int_start() {
}
; CHECK-LABEL: name: test_undef
-; CHECK: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s32) = G_IMPLICIT_DEF
; CHECK: %w0 = COPY [[UNDEF]]
define i32 @test_undef() {
ret i32 undef
@@ -807,7 +807,7 @@ define float @test_frem(float %arg1, float %arg2) {
; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SADDO [[LHS]], [[RHS]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -824,7 +824,7 @@ define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[ZERO:%[0-9]+]](s1) = G_CONSTANT i1 false
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_UADDE [[LHS]], [[RHS]], [[ZERO]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -840,7 +840,7 @@ define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SSUBO [[LHS]], [[RHS]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -857,7 +857,7 @@ define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[ZERO:%[0-9]+]](s1) = G_CONSTANT i1 false
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_USUBE [[LHS]], [[RHS]], [[ZERO]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -873,7 +873,7 @@ define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SMULO [[LHS]], [[RHS]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -889,7 +889,7 @@ define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_UMULO [[LHS]], [[RHS]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -1271,6 +1271,45 @@ define float @test_fma_intrin(float %a, float %b, float %c) {
ret float %res
}
+declare float @llvm.exp.f32(float)
+define float @test_exp_intrin(float %a) {
+; CHECK-LABEL: name: test_exp_intrin
+; CHECK: [[A:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FEXP [[A]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.exp.f32(float %a)
+ ret float %res
+}
+
+declare float @llvm.exp2.f32(float)
+define float @test_exp2_intrin(float %a) {
+; CHECK-LABEL: name: test_exp2_intrin
+; CHECK: [[A:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FEXP2 [[A]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.exp2.f32(float %a)
+ ret float %res
+}
+
+declare float @llvm.log.f32(float)
+define float @test_log_intrin(float %a) {
+; CHECK-LABEL: name: test_log_intrin
+; CHECK: [[A:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FLOG [[A]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.log.f32(float %a)
+ ret float %res
+}
+
+declare float @llvm.log2.f32(float)
+define float @test_log2_intrin(float %a) {
+; CHECK-LABEL: name: test_log2_intrin
+; CHECK: [[A:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FLOG2 [[A]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.log2.f32(float %a)
+ ret float %res
+}
declare void @llvm.lifetime.start.p0i8(i64, i8*)
declare void @llvm.lifetime.end.p0i8(i64, i8*)
define void @test_lifetime_intrin() {
@@ -1464,7 +1503,7 @@ define float @test_different_call_conv_target(float %x) {
define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) {
; CHECK-LABEL: name: test_shufflevector_s32_v2s32
; CHECK: [[ARG:%[0-9]+]](s32) = COPY %w0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32)
; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>)
@@ -1477,7 +1516,7 @@ define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) {
define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) {
; CHECK-LABEL: name: test_shufflevector_v2s32_s32
; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK: [[RES:%[0-9]+]](s32) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[C1]](s32)
; CHECK: %w0 = COPY [[RES]](s32)
@@ -1489,7 +1528,7 @@ define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) {
define <2 x i32> @test_shufflevector_v2s32_v2s32(<2 x i32> %arg) {
; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32
; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32)
@@ -1502,7 +1541,7 @@ define <2 x i32> @test_shufflevector_v2s32_v2s32(<2 x i32> %arg) {
define i32 @test_shufflevector_v2s32_v3s32(<2 x i32> %arg) {
; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[MASK:%[0-9]+]](<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32)
@@ -1531,7 +1570,7 @@ define <4 x i32> @test_shufflevector_v2s32_v4s32(<2 x i32> %arg1, <2 x i32> %arg
define <2 x i32> @test_shufflevector_v4s32_v2s32(<4 x i32> %arg) {
; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32
; CHECK: [[ARG:%[0-9]+]](<4 x s32>) = COPY %q0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32)
@@ -1570,7 +1609,7 @@ define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2)
}
; CHECK-LABEL: test_constant_vector
-; CHECK: [[UNDEF:%[0-9]+]](s16) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s16) = G_IMPLICIT_DEF
; CHECK: [[F:%[0-9]+]](s16) = G_FCONSTANT half 0xH3C00
; CHECK: [[M:%[0-9]+]](<4 x s16>) = G_MERGE_VALUES [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16)
; CHECK: %d0 = COPY [[M]](<4 x s16>)
diff --git a/test/CodeGen/AArch64/GlobalISel/call-translator.ll b/test/CodeGen/AArch64/GlobalISel/call-translator.ll
index 0e593fdb7b852..8fba8e09f9ffa 100644
--- a/test/CodeGen/AArch64/GlobalISel/call-translator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/call-translator.ll
@@ -64,7 +64,7 @@ define void @test_multiple_args(i64 %in) {
; CHECK: [[I8:%[0-9]+]](s8) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
-; CHECK: [[UNDEF:%[0-9]+]](s192) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s192) = G_IMPLICIT_DEF
; CHECK: [[ARG0:%[0-9]+]](s192) = G_INSERT [[UNDEF]], [[DBL]](s64), 0
; CHECK: [[ARG1:%[0-9]+]](s192) = G_INSERT [[ARG0]], [[I64]](s64), 64
; CHECK: [[ARG2:%[0-9]+]](s192) = G_INSERT [[ARG1]], [[I8]](s8), 128
diff --git a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
index ef4445111d7b2..d9fec0ec7d46b 100644
--- a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
+++ b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
@@ -19,7 +19,7 @@ declare i32 @llvm.eh.typeid.for(i8*)
; CHECK: [[BAD]] (landing-pad):
; CHECK: EH_LABEL
-; CHECK: [[UNDEF:%[0-9]+]](s128) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s128) = G_IMPLICIT_DEF
; CHECK: [[PTR:%[0-9]+]](p0) = COPY %x0
; CHECK: [[VAL_WITH_PTR:%[0-9]+]](s128) = G_INSERT [[UNDEF]], [[PTR]](p0), 0
; CHECK: [[SEL_PTR:%[0-9]+]](p0) = COPY %x1
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir b/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
index e3e0175d39ac5..fbacc28d7434e 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
@@ -57,11 +57,11 @@ body: |
%0:_(s64) = COPY %x0
; CHECK-LABEL: name: test_combines_4
- ; CHECK: %2(<2 x s32>) = G_EXTRACT %1(s128), 0
- ; CHECK: %3(<2 x s32>) = G_ADD %2, %2
+ ; CHECK: %2(s64) = COPY %0(s64)
+ ; CHECK: %3(s64) = G_ADD %2, %2
%1:_(s128) = G_MERGE_VALUES %0, %0
- %2:_(<2 x s32>) = G_EXTRACT %1, 0
- %3:_(<2 x s32>) = G_ADD %2, %2
+ %2:_(s64) = G_EXTRACT %1, 0
+ %3:_(s64) = G_ADD %2, %2
...
---
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
index 23e7d5163e5a5..42ca367e122bb 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
@@ -22,12 +22,11 @@ declare void @_Unwind_Resume(i8*)
; CHECK: [[SEL:%[0-9]+]](s32) = G_PTRTOINT [[SEL_PTR]]
; CHECK: [[STRUCT_SEL:%[0-9]+]](s64) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 0
-; CHECK: [[STRUCT:%[0-9]+]](s128) = G_MERGE_VALUES [[STRUCT_PTR]](s64), [[STRUCT_SEL]]
-
-; CHECK: [[PTR:%[0-9]+]](p0) = G_EXTRACT [[STRUCT]](s128), 0
+; CHECK: [[PTR:%[0-9]+]](p0) = G_INTTOPTR [[STRUCT_PTR]](s64)
; CHECK: G_STORE [[PTR]](p0), {{%[0-9]+}}(p0)
-; CHECK: [[SEL:%[0-9]+]](s32) = G_EXTRACT [[STRUCT]](s128), 64
+; CHECK: [[SEL_TMP:%[0-9]+]](s32) = G_EXTRACT [[STRUCT_SEL]](s64), 0
+; CHECK: [[SEL:%[0-9]+]](s32) = COPY [[SEL_TMP]]
; CHECK: G_STORE [[SEL]](s32), {{%[0-9]+}}(p0)
define void @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir b/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir
new file mode 100644
index 0000000000000..dc6b59b24a9ae
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir
@@ -0,0 +1,85 @@
+# RUN: llc -mtriple=aarch64-linux-gnu -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+---
+name: test_extracts_1
+body: |
+ bb.0:
+ liveins: %w0
+
+ ; Low part of extraction takes entirity of the low register entirely, so
+ ; value stored is forwarded directly from first load.
+
+ ; CHECK-LABEL: name: test_extracts_1
+ ; CHECK: [[LO:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: {{%[0-9]+}}(s64) = G_LOAD
+ ; CHECK: [[VAL:%[0-9]+]](s64) = COPY [[LO]]
+ ; CHECK: G_STORE [[VAL]]
+ %0:_(s64) = COPY %x0
+ %1:_(s32) = COPY %w1
+ %2:_(p0) = COPY %x2
+ %3:_(s128) = G_LOAD %2(p0) :: (load 16)
+ %4:_(s64) = G_EXTRACT %3(s128), 0
+ G_STORE %4(s64), %2(p0) :: (store 8)
+ RET_ReallyLR
+...
+
+---
+name: test_extracts_2
+body: |
+ bb.0:
+ liveins: %w0
+
+ ; Low extraction wipes takes whole low register. High extraction is real.
+ ; CHECK-LABEL: name: test_extracts_2
+ ; CHECK: [[LO_TMP:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[HI:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[LO:%[0-9]+]](s64) = COPY [[LO_TMP]]
+ ; CHECK: [[NEWHI_TMP:%[0-9]+]](s32) = G_EXTRACT [[HI]](s64), 0
+ ; CHECK: [[NEWHI:%[0-9]+]](s32) = COPY [[NEWHI_TMP]]
+ ; CHECK: G_STORE [[LO]]
+ ; CHECK: G_STORE [[NEWHI]]
+ %0:_(s64) = COPY %x0
+ %1:_(s32) = COPY %w1
+ %2:_(p0) = COPY %x2
+ %3:_(s128) = G_LOAD %2(p0) :: (load 16)
+ %4:_(s64) = G_EXTRACT %3(s128), 0
+ %5:_(s32) = G_EXTRACT %3(s128), 64
+ G_STORE %4(s64), %2(p0) :: (store 8)
+ G_STORE %5(s32), %2(p0) :: (store 4)
+ RET_ReallyLR
+...
+
+---
+name: test_extracts_3
+body: |
+ bb.0:
+ liveins: %x0, %x1, %x2
+
+
+ ; CHECK-LABEL: name: test_extracts_3
+ ; CHECK: [[LO:%[0-9]+]](s32) = G_EXTRACT %0(s64), 32
+ ; CHECK: [[HI:%[0-9]+]](s32) = G_EXTRACT %1(s64), 0
+ ; CHECK: %3(s64) = G_MERGE_VALUES [[LO]](s32), [[HI]](s32)
+ %0:_(s64) = COPY %x0
+ %1:_(s64) = COPY %x1
+ %2:_(s128) = G_MERGE_VALUES %0, %1
+ %3:_(s64) = G_EXTRACT %2, 32
+ RET_ReallyLR
+...
+
+---
+name: test_extracts_4
+body: |
+ bb.0:
+ liveins: %x0, %x1, %x2
+
+
+ ; CHECK-LABEL: name: test_extracts_4
+ ; CHECK: [[LO_TMP:%[0-9]+]](s32) = G_EXTRACT %0(s64), 32
+ ; CHECK: %3(s32) = COPY [[LO_TMP]]
+ %0:_(s64) = COPY %x0
+ %1:_(s64) = COPY %x1
+ %2:_(s128) = G_MERGE_VALUES %0, %1
+ %3:_(s32) = G_EXTRACT %2, 32
+ RET_ReallyLR
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir b/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
new file mode 100644
index 0000000000000..e7cf59b3394e3
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
@@ -0,0 +1,15 @@
+# RUN: llc -mtriple=aarch64-linux-gnu -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+---
+name: test_implicit_def
+registers:
+body: |
+ bb.0.entry:
+ liveins:
+ ; CHECK-LABEL: name: test_implicit_def
+ ; CHECK: [[LO:%[0-9]+]](s64) = G_IMPLICIT_DEF
+ ; CHECK: [[HI:%[0-9]+]](s64) = G_IMPLICIT_DEF
+ ; CHECK: %0(s128) = G_MERGE_VALUES [[LO]](s64), [[HI]](s64)
+
+ %0:_(s128) = G_IMPLICIT_DEF
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-trunc.mir b/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
index 5559e2d3a0d12..f43a9ab34ffd2 100644
--- a/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
+++ b/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
@@ -15,8 +15,8 @@ legalized: true
regBankSelected: true
# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
+# CHECK-NEXT: - { id: 0, class: gpr64sp, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr32sp, preferred-register: '' }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll
index 2682fa7dcce1b..a910585e7f5db 100644
--- a/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -108,9 +108,9 @@ if.end: ; preds = %if.then, %lor.lhs.f
; CHECK: cmp w0, #1
; CHECK: sdiv [[DIVRES:w[0-9]+]], w1, w0
; CHECK: ccmp [[DIVRES]], #16, #0, ge
-; CHECK: b.gt [[BLOCK:LBB[0-9_]+]]
-; CHECK: bl _foo
+; CHECK: b.le [[BLOCK:LBB[0-9_]+]]
; CHECK: [[BLOCK]]:
+; CHECK: bl _foo
; CHECK: orr w0, wzr, #0x7
define i32 @speculate_division(i32 %a, i32 %b) nounwind ssp {
entry:
@@ -135,7 +135,7 @@ if.end:
; CHECK: cmp
; CHECK-NOT: b.
; CHECK: fccmp {{.*}}, #8, ge
-; CHECK: b.lt
+; CHECK: b.ge
define i32 @single_fcmp(i32 %a, float %b) nounwind ssp {
entry:
%cmp = icmp sgt i32 %a, 0
diff --git a/test/CodeGen/AArch64/arm64-spill-remarks.ll b/test/CodeGen/AArch64/arm64-spill-remarks.ll
index bc9340352d754..cfebeb496e18f 100644
--- a/test/CodeGen/AArch64/arm64-spill-remarks.ll
+++ b/test/CodeGen/AArch64/arm64-spill-remarks.ll
@@ -3,6 +3,15 @@
; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple 2>&1 | FileCheck -check-prefix=NO_REMARK %s
; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-output=%t.yaml -pass-remarks-with-hotness 2>&1 | FileCheck -check-prefix=NO_REMARK %s
; RUN: cat %t.yaml | FileCheck -check-prefix=YAML %s
+;
+; Verify that remarks below the hotness threshold are not output.
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-missed=regalloc \
+; RUN: -pass-remarks-with-hotness -pass-remarks-hotness-threshold=500 \
+; RUN: 2>&1 | FileCheck -check-prefix=THRESHOLD %s
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-output=%t.threshold.yaml \
+; RUN: -pass-remarks-with-hotness -pass-remarks-hotness-threshold=500 \
+; RUN: 2>&1 | FileCheck -check-prefix=NO_REMARK %s
+; RUN: cat %t.threshold.yaml | FileCheck -check-prefix=THRESHOLD_YAML %s
; This has two nested loops, each with one value that has to be spilled and
; then reloaded.
@@ -23,6 +32,9 @@
; NO_REMARK-NOT: remark
+; THRESHOLD-NOT: (hotness: 300)
+; THRESHOLD: remark: /tmp/kk.c:2:20: 1 spills 1 reloads generated in loop (hotness: 30000)
+
; YAML: --- !Missed
; YAML: Pass: regalloc
; YAML: Name: LoopSpillReload
@@ -63,6 +75,21 @@
; YAML: - String: generated in loop
; YAML: ...
+; THRESHOLD_YAML-NOT: Hotness: 300{{$}}
+; THRESHOLD_YAML: --- !Missed
+; THRESHOLD_YAML: Pass: regalloc
+; THRESHOLD_YAML: Name: LoopSpillReload
+; THRESHOLD_YAML: DebugLoc: { File: /tmp/kk.c, Line: 2, Column: 20 }
+; THRESHOLD_YAML: Function: fpr128
+; THRESHOLD_YAML: Hotness: 30000
+; THRESHOLD_YAML: Args:
+; THRESHOLD_YAML: - NumSpills: '1'
+; THRESHOLD_YAML: - String: ' spills '
+; THRESHOLD_YAML: - NumReloads: '1'
+; THRESHOLD_YAML: - String: ' reloads '
+; THRESHOLD_YAML: - String: generated in loop
+; THRESHOLD_YAML: ...
+
define void @fpr128(<4 x float>* %p) nounwind ssp !prof !11 {
entry:
br label %loop, !dbg !8
diff --git a/test/CodeGen/AArch64/ccmp-successor-probs.mir b/test/CodeGen/AArch64/ccmp-successor-probs.mir
new file mode 100644
index 0000000000000..8e81c419841be
--- /dev/null
+++ b/test/CodeGen/AArch64/ccmp-successor-probs.mir
@@ -0,0 +1,46 @@
+# RUN: llc -o - %s -mtriple=aarch64--linux-gnu -mcpu=falkor -run-pass=aarch64-ccmp | FileCheck %s
+---
+# This test checks that successor probabilties are properly updated after a
+# ccmp-conversion.
+#
+# CHECK-LABEL: name: aarch64-ccmp-successor-probs
+# CHECK: bb.0:
+# CHECK-NEXT: successors: %bb.2(0x04000000), %bb.3(0x7c000000)
+# CHECK: CCMPXr %5, %4, 0, 10, implicit-def %nzcv, implicit %nzcv
+#
+name: aarch64-ccmp-successor-probs
+registers:
+ - { id: 0, class: gpr64 }
+ - { id: 1, class: gpr64 }
+ - { id: 2, class: gpr64 }
+ - { id: 3, class: gpr64 }
+ - { id: 4, class: gpr64 }
+ - { id: 5, class: gpr64 }
+ - { id: 6, class: gpr64 }
+ - { id: 7, class: gpr64 }
+body : |
+ bb.0:
+ successors: %bb.1(0x7e000000), %bb.2(0x02000000)
+
+ %0 = LDRXui killed %x0, 69
+ %1 = COPY %xzr
+ %2 = SUBSXrr %1, %0, implicit-def dead %nzcv
+ %3 = SUBSXri %x1, 1, 0, implicit-def dead %nzcv
+ %4 = COPY %0
+ %5 = COPY %3
+ %6 = SUBSXrr %x1, killed %2, implicit-def %nzcv
+ Bcc 11, %bb.2, implicit %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2(0x02082082), %bb.3(0x7df7df7e)
+
+ %7 = SUBSXrr %5, %4, implicit-def %nzcv
+ Bcc 12, %bb.2, implicit %nzcv
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3(0x80000000)
+
+ bb.3:
+...
diff --git a/test/CodeGen/AArch64/cond-br-tuning.ll b/test/CodeGen/AArch64/cond-br-tuning.ll
index 628d89e34a017..d966acbebfddb 100644
--- a/test/CodeGen/AArch64/cond-br-tuning.ll
+++ b/test/CodeGen/AArch64/cond-br-tuning.ll
@@ -83,7 +83,7 @@ L2:
; CHECK-LABEL: test_add_tbz:
; CHECK: adds
-; CHECK: b.ge
+; CHECK: b.pl
; CHECK: ret
define void @test_add_tbz(i32 %a, i32 %b, i32* %ptr) {
entry:
@@ -99,7 +99,7 @@ L2:
; CHECK-LABEL: test_subs_tbz:
; CHECK: subs
-; CHECK: b.ge
+; CHECK: b.pl
; CHECK: ret
define void @test_subs_tbz(i32 %a, i32 %b, i32* %ptr) {
entry:
@@ -115,7 +115,7 @@ L2:
; CHECK-LABEL: test_add_tbnz
; CHECK: adds
-; CHECK: b.lt
+; CHECK: b.mi
; CHECK: ret
define void @test_add_tbnz(i32 %a, i32 %b, i32* %ptr) {
entry:
@@ -131,7 +131,7 @@ L2:
; CHECK-LABEL: test_subs_tbnz
; CHECK: subs
-; CHECK: b.lt
+; CHECK: b.mi
; CHECK: ret
define void @test_subs_tbnz(i32 %a, i32 %b, i32* %ptr) {
entry:
diff --git a/test/CodeGen/AMDGPU/alignbit-pat.ll b/test/CodeGen/AMDGPU/alignbit-pat.ll
new file mode 100644
index 0000000000000..ff5c8960fad36
--- /dev/null
+++ b/test/CodeGen/AMDGPU/alignbit-pat.ll
@@ -0,0 +1,100 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}alignbit_shr_pat:
+; GCN-DAG: s_load_dword s[[SHR:[0-9]+]]
+; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], s[[SHR]]
+
+define amdgpu_kernel void @alignbit_shr_pat(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 31
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = lshr i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_v:
+; GCN-DAG: load_dword v[[SHR:[0-9]+]],
+; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], v[[SHR]]
+
+define amdgpu_kernel void @alignbit_shr_pat_v(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
+bb:
+ %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tid
+ %tmp = load i64, i64 addrspace(1)* %gep1, align 8
+ %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tid
+ %amt = load i32, i32 addrspace(1)* %gep2, align 4
+ %tmp3 = and i32 %amt, 31
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = lshr i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %gep2, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and30:
+; Negative test, wrong constant
+; GCN: v_lshr_b64
+; GCN-NOT: v_alignbit_b32
+
+define amdgpu_kernel void @alignbit_shr_pat_wrong_and30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 30
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = lshr i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and63:
+; Negative test, wrong constant
+; GCN: v_lshr_b64
+; GCN-NOT: v_alignbit_b32
+
+define amdgpu_kernel void @alignbit_shr_pat_wrong_and63(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 63
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = lshr i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_const30:
+; GCN: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], 30
+
+define amdgpu_kernel void @alignbit_shr_pat_const30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp5 = lshr i64 %tmp, 30
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_const33:
+; Negative test, shift amount more than 31
+; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; GCN-NOT: v_alignbit_b32
+
+define amdgpu_kernel void @alignbit_shr_pat_wrong_const33(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp5 = lshr i64 %tmp, 33
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/bug-vopc-commute.ll b/test/CodeGen/AMDGPU/bug-vopc-commute.ll
index 7c02d8385462f..e951b5e089279 100644
--- a/test/CodeGen/AMDGPU/bug-vopc-commute.ll
+++ b/test/CodeGen/AMDGPU/bug-vopc-commute.ll
@@ -8,8 +8,8 @@
; of which were in SGPRs.
define amdgpu_vs float @main(i32 %v) {
main_body:
- %d1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 960)
- %d2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 976)
+ %d1 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 960)
+ %d2 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 976)
br i1 undef, label %ENDIF56, label %IF57
IF57: ; preds = %ENDIF
@@ -41,7 +41,7 @@ ENDIF62: ; preds = %ENDIF59
}
; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #0
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #0
attributes #0 = { nounwind readnone }
attributes #1 = { readnone }
diff --git a/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll b/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
index 53adf09026ec5..04ad3bcccd3f3 100644
--- a/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
+++ b/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
@@ -176,14 +176,13 @@ ret:
; OPT: ret
; GCN-LABEL: {{^}}sink_ubfe_i64_span_midpoint:
-; GCN: s_cbranch_scc1 BB3_2
-; GCN: s_lshr_b64 s{{\[}}[[LO:[0-9]+]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, 30
-; GCN: s_and_b32 s{{[0-9]+}}, s[[LO]], 0xff
+; GCN: v_alignbit_b32 v[[LO:[0-9]+]], s{{[0-9]+}}, v{{[0-9]+}}, 30
+; GCN: s_cbranch_scc1 BB3_2
+; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xff, v[[LO]]
; GCN: BB3_2:
-; GCN: s_lshr_b64 s{{\[}}[[LO:[0-9]+]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, 30
-; GCN: s_and_b32 s{{[0-9]+}}, s[[LO]], 0x7f
+; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x7f, v[[LO]]
; GCN: BB3_3:
; GCN: buffer_store_dwordx2
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll b/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll
index a68ddabd95609..37fd08242fbaa 100644
--- a/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll
+++ b/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll
@@ -16,7 +16,9 @@
; CHECK: ---
; CHECK: Version: [ 1, 0 ]
-; CHECK: Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+; CHECK: Printf:
+; CHECK: - '1:1:4:%d\n'
+; CHECK: - '2:1:8:%g\n'
; CHECK: Kernels:
; CHECK: - Name: test_char
@@ -1253,8 +1255,8 @@ define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a,
; NOTES-NEXT: Owner Data size Description
; NOTES-NEXT: AMD 0x00000008 Unknown note type: (0x00000001)
; NOTES-NEXT: AMD 0x0000001b Unknown note type: (0x00000003)
-; GFX700: AMD 0x00008b06 Unknown note type: (0x0000000a)
-; GFX800: AMD 0x00008e6a Unknown note type: (0x0000000a)
-; GFX900: AMD 0x00008b06 Unknown note type: (0x0000000a)
+; GFX700: AMD 0x00008b0a Unknown note type: (0x0000000a)
+; GFX800: AMD 0x00008e6e Unknown note type: (0x0000000a)
+; GFX900: AMD 0x00008b0a Unknown note type: (0x0000000a)
; PARSER: AMDGPU Code Object Metadata Parser Test: PASS
diff --git a/test/CodeGen/AMDGPU/combine-and-sext-bool.ll b/test/CodeGen/AMDGPU/combine-and-sext-bool.ll
new file mode 100644
index 0000000000000..cd4ac4d58ad3d
--- /dev/null
+++ b/test/CodeGen/AMDGPU/combine-and-sext-bool.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}and_i1_sext_bool:
+; GCN: v_cmp_{{gt|le}}_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e{{32|64}} [[VAL:v[0-9]+]], 0, v{{[0-9]+}}, [[CC]]
+; GCN: store_dword {{.*}}[[VAL]]
+; GCN-NOT: v_cndmask_b32_e64 v{{[0-9]+}}, {{0|-1}}, {{0|-1}}
+; GCN-NOT: v_and_b32_e32
+
+define amdgpu_kernel void @and_i1_sext_bool(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
+ %v = load i32, i32 addrspace(1)* %gep, align 4
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %and = and i32 %v, %ext
+ store i32 %and, i32 addrspace(1)* %gep, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+declare i32 @llvm.amdgcn.workitem.id.y() #0
+
+attributes #0 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/combine-cond-add-sub.ll b/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
index 187fb24dfb665..9e47c7d3449c8 100644
--- a/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
+++ b/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
@@ -150,6 +150,26 @@ bb:
ret void
}
+; GCN-LABEL: {{^}}add_and:
+; GCN: s_and_b64 [[CC:[^,]+]],
+; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
+; GCN-NOT: v_cndmask
+
+define amdgpu_kernel void @add_and(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
+ %v = load i32, i32 addrspace(1)* %gep, align 4
+ %cmp1 = icmp ugt i32 %x, %y
+ %cmp2 = icmp ugt i32 %x, 1
+ %cmp = and i1 %cmp1, %cmp2
+ %ext = zext i1 %cmp to i32
+ %add = add i32 %v, %ext
+ store i32 %add, i32 addrspace(1)* %gep, align 4
+ ret void
+}
+
declare i1 @llvm.amdgcn.class.f32(float, i32) #0
declare i32 @llvm.amdgcn.workitem.id.x() #0
diff --git a/test/CodeGen/AMDGPU/fold-fmul-to-neg-abs.ll b/test/CodeGen/AMDGPU/fold-fmul-to-neg-abs.ll
new file mode 100644
index 0000000000000..3637722d004d3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fold-fmul-to-neg-abs.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}fold_mul_neg:
+; GCN: load_dword [[V:v[0-9]+]]
+; GCN: v_or_b32_e32 [[NEG:v[0-9]]], 0x80000000, [[V]]
+; GCN: store_dword [[NEG]]
+
+define amdgpu_kernel void @fold_mul_neg(float addrspace(1)* %arg) {
+ %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tid
+ %v = load float, float addrspace(1)* %gep, align 4
+ %cmp = fcmp fast ogt float %v, 0.000000e+00
+ %sel = select i1 %cmp, float -1.000000e+00, float 1.000000e+00
+ %mul = fmul fast float %v, %sel
+ store float %mul, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}fold_mul_abs:
+; GCN: load_dword [[V:v[0-9]+]]
+; GCN: v_and_b32_e32 [[ABS:v[0-9]]], 0x7fffffff, [[V]]
+; GCN: store_dword [[ABS]]
+
+define amdgpu_kernel void @fold_mul_abs(float addrspace(1)* %arg) {
+ %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tid
+ %v = load float, float addrspace(1)* %gep, align 4
+ %cmp = fcmp fast olt float %v, 0.000000e+00
+ %sel = select i1 %cmp, float -1.000000e+00, float 1.000000e+00
+ %mul = fmul fast float %v, %sel
+ store float %mul, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll b/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
index 51f564d969095..564d2b32964ff 100644
--- a/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
+++ b/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
@@ -14,24 +14,24 @@
; CHECK: s_movk_i32 [[K:s[0-9]+]], 0x4d2 ; encoding
; CHECK: buffer_load_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, [[K]] idxen offen offset:65535 glc slc
-define amdgpu_vs void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, [2 x <16 x i8>] addrspace(2)* byval %arg3, [17 x <16 x i8>] addrspace(2)* inreg %arg4, [17 x <16 x i8>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) {
+define amdgpu_vs void @main([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, [2 x <4 x i32>] addrspace(2)* byval %arg3, [17 x <4 x i32>] addrspace(2)* inreg %arg4, [17 x <4 x i32>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) {
main_body:
- %tmp = getelementptr [2 x <16 x i8>], [2 x <16 x i8>] addrspace(2)* %arg3, i64 0, i32 1
- %tmp10 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
+ %tmp = getelementptr [2 x <4 x i32>], [2 x <4 x i32>] addrspace(2)* %arg3, i64 0, i32 1
+ %tmp10 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
%tmp11 = shl i32 %arg6, 2
- %tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
+ %tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
%tmp13 = bitcast i32 %tmp12 to float
- %tmp14 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 %tmp11, i32 0, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0)
+ %tmp14 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 %tmp11, i32 0, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0)
%tmp15 = bitcast i32 %tmp14 to float
- %tmp16 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 %tmp11, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 0)
+ %tmp16 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 %tmp11, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 0)
%tmp17 = bitcast i32 %tmp16 to float
- %tmp18 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp18 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 0)
%tmp19 = bitcast i32 %tmp18 to float
- %tmp20 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 123, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp20 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 123, i32 1, i32 1, i32 1, i32 1, i32 0)
%tmp21 = bitcast i32 %tmp20 to float
- %tmp22 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 1234, i32 65535, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp22 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer, i32 1234, i32 65535, i32 1, i32 1, i32 1, i32 1, i32 0)
%tmp23 = bitcast i32 %tmp22 to float
call void @llvm.amdgcn.exp.f32(i32 15, i32 12, float %tmp13, float %tmp15, float %tmp17, float %tmp19, i1 false, i1 false)
@@ -40,10 +40,10 @@ main_body:
}
; Function Attrs: nounwind readonly
-declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #0
+declare i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
; Function Attrs: nounwind readonly
-declare i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32) #0
+declare i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32) #0
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1
diff --git a/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll b/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll
index cd9c082ed941a..01b76422c03f8 100644
--- a/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll
+++ b/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll
@@ -5,7 +5,7 @@
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 offen offset:32 glc slc
define amdgpu_vs void @test1(i32 %a1, i32 %vaddr) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
ret void
@@ -15,7 +15,7 @@ define amdgpu_vs void @test1(i32 %a1, i32 %vaddr) {
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 idxen offset:32 glc slc
define amdgpu_vs void @test1_idx(i32 %a1, i32 %vaddr) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 0, i32 1, i32 1,
i32 1, i32 0)
ret void
@@ -25,7 +25,7 @@ define amdgpu_vs void @test1_idx(i32 %a1, i32 %vaddr) {
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, {{s[0-9]+}} idxen offset:32 glc slc
define amdgpu_vs void @test1_scalar_offset(i32 %a1, i32 %vaddr, i32 inreg %soffset) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 %soffset, i32 32, i32 14, i32 4, i32 0, i32 1, i32 1,
i32 1, i32 0)
ret void
@@ -35,7 +35,7 @@ define amdgpu_vs void @test1_scalar_offset(i32 %a1, i32 %vaddr, i32 inreg %soffs
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 offen offset:32
define amdgpu_vs void @test1_no_glc_slc(i32 %a1, i32 %vaddr) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 0,
i32 0, i32 0)
ret void
@@ -45,7 +45,7 @@ define amdgpu_vs void @test1_no_glc_slc(i32 %a1, i32 %vaddr) {
;CHECK: tbuffer_store_format_xyz {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:13, nfmt:4, 0 offen offset:24 glc slc
define amdgpu_vs void @test2(i32 %a1, i32 %vaddr) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 3, i32 %vaddr, i32 0, i32 24, i32 13, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
ret void
@@ -55,7 +55,7 @@ define amdgpu_vs void @test2(i32 %a1, i32 %vaddr) {
;CHECK: tbuffer_store_format_xy {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:11, nfmt:4, 0 offen offset:16 glc slc
define amdgpu_vs void @test3(i32 %a1, i32 %vaddr) {
%vdata = insertelement <2 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v2i32(<16 x i8> undef, <2 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v2i32(<4 x i32> undef, <2 x i32> %vdata,
i32 2, i32 %vaddr, i32 0, i32 16, i32 11, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
ret void
@@ -64,12 +64,12 @@ define amdgpu_vs void @test3(i32 %a1, i32 %vaddr) {
;CHECK-LABEL: {{^}}test4:
;CHECK: tbuffer_store_format_x {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:4, nfmt:4, 0 offen offset:8 glc slc
define amdgpu_vs void @test4(i32 %vdata, i32 %vaddr) {
- call void @llvm.SI.tbuffer.store.i32(<16 x i8> undef, i32 %vdata,
+ call void @llvm.SI.tbuffer.store.i32(<4 x i32> undef, i32 %vdata,
i32 1, i32 %vaddr, i32 0, i32 8, i32 4, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
ret void
}
-declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
-declare void @llvm.SI.tbuffer.store.v2i32(<16 x i8>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
-declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+declare void @llvm.SI.tbuffer.store.i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+declare void @llvm.SI.tbuffer.store.v2i32(<4 x i32>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+declare void @llvm.SI.tbuffer.store.v4i32(<4 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
diff --git a/test/CodeGen/AMDGPU/misched-killflags.mir b/test/CodeGen/AMDGPU/misched-killflags.mir
new file mode 100644
index 0000000000000..ac3a25e5e4b36
--- /dev/null
+++ b/test/CodeGen/AMDGPU/misched-killflags.mir
@@ -0,0 +1,45 @@
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -run-pass=post-RA-sched -o - %s | FileCheck %s
+# Make sure ScheduleDAGInstrs::fixupKills does not produce invalid kill flags.
+---
+name: func0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3
+
+ %sgpr33 = S_MOV_B32 %sgpr7
+ %sgpr32 = S_MOV_B32 %sgpr33
+ %sgpr10 = S_MOV_B32 5
+ %sgpr9 = S_MOV_B32 4
+ %sgpr8 = S_MOV_B32 3
+ BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc {
+ %sgpr6_sgpr7 = S_GETPC_B64
+ %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc
+ %sgpr7 = S_ADDC_U32 internal %sgpr7,0, implicit-def %scc, implicit internal %scc
+ }
+ %sgpr4 = S_MOV_B32 %sgpr33
+ %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+ %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+ %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+ %vgpr3 = V_MOV_B32_e32 %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec
+ S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3
+ S_ENDPGM
+...
+# CHECK-LABEL: name: func0
+# CHECK: %sgpr10 = S_MOV_B32 5
+# CHECK: %sgpr9 = S_MOV_B32 4
+# CHECK: %sgpr8 = S_MOV_B32 3
+# CHECK: %sgpr33 = S_MOV_B32 killed %sgpr7
+# CHECK: %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc {
+# CHECK: %sgpr6_sgpr7 = S_GETPC_B64
+# CHECK: %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc
+# CHECK: %sgpr7 = S_ADDC_U32 internal %sgpr7, 0, implicit-def %scc, implicit internal %scc
+# CHECK: }
+# CHECK: %sgpr4 = S_MOV_B32 %sgpr33
+# CHECK: %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: %vgpr3 = V_MOV_B32_e32 killed %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec
+# CHECK: %sgpr32 = S_MOV_B32 killed %sgpr33
+# CHECK: S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3
+# CHECK: S_ENDPGM
diff --git a/test/CodeGen/AMDGPU/mubuf.ll b/test/CodeGen/AMDGPU/mubuf.ll
index d883b87ec401f..b23b21118aaa3 100644
--- a/test/CodeGen/AMDGPU/mubuf.ll
+++ b/test/CodeGen/AMDGPU/mubuf.ll
@@ -55,14 +55,14 @@ entry:
; CHECK-LABEL: {{^}}soffset_max_imm:
; CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 64 offen glc
-define amdgpu_gs void @soffset_max_imm([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
+define amdgpu_gs void @soffset_max_imm([6 x <4 x i32>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
main_body:
- %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
- %tmp1 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp0
+ %tmp0 = getelementptr [6 x <4 x i32>], [6 x <4 x i32>] addrspace(2)* %0, i32 0, i32 0
+ %tmp1 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp0
%tmp2 = shl i32 %6, 2
- %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 64, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
+ %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp1, i32 %tmp2, i32 64, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
%tmp4 = add i32 %6, 16
- %tmp1.4xi32 = bitcast <16 x i8> %tmp1 to <4 x i32>
+ %tmp1.4xi32 = bitcast <4 x i32> %tmp1 to <4 x i32>
call void @llvm.amdgcn.tbuffer.store.i32(i32 %tmp3, <4 x i32> %tmp1.4xi32, i32 0, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i1 1, i1 1)
ret void
}
@@ -74,14 +74,14 @@ main_body:
; CHECK-LABEL: {{^}}soffset_no_fold:
; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x41
; CHECK: buffer_load_dword v{{[0-9+]}}, v{{[0-9+]}}, s[{{[0-9]+}}:{{[0-9]+}}], [[SOFFSET]] offen glc
-define amdgpu_gs void @soffset_no_fold([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
+define amdgpu_gs void @soffset_no_fold([6 x <4 x i32>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
main_body:
- %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
- %tmp1 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp0
+ %tmp0 = getelementptr [6 x <4 x i32>], [6 x <4 x i32>] addrspace(2)* %0, i32 0, i32 0
+ %tmp1 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp0
%tmp2 = shl i32 %6, 2
- %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 65, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
+ %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp1, i32 %tmp2, i32 65, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
%tmp4 = add i32 %6, 16
- %tmp1.4xi32 = bitcast <16 x i8> %tmp1 to <4 x i32>
+ %tmp1.4xi32 = bitcast <4 x i32> %tmp1 to <4 x i32>
call void @llvm.amdgcn.tbuffer.store.i32(i32 %tmp3, <4 x i32> %tmp1.4xi32, i32 0, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i1 1, i1 1)
ret void
}
@@ -176,7 +176,7 @@ define amdgpu_kernel void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
ret void
}
-declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #0
+declare i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
declare void @llvm.amdgcn.tbuffer.store.i32(i32, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1)
attributes #0 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir b/test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir
deleted file mode 100644
index 31024277871d8..0000000000000
--- a/test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir
+++ /dev/null
@@ -1,69 +0,0 @@
-# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=simple-register-coalescing,rename-independent-subregs -o - %s | FileCheck -check-prefix=GCN %s
----
-
-# GCN-LABEL: name: mac_invalid_operands
-# GCN: undef %18.sub0 = V_MAC_F32_e32 undef %3, undef %9, undef %18.sub0, implicit %exec
-
-name: mac_invalid_operands
-alignment: 0
-exposesReturnsTwice: false
-legalized: false
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: vreg_128 }
- - { id: 1, class: vreg_128 }
- - { id: 2, class: sgpr_64 }
- - { id: 3, class: vgpr_32 }
- - { id: 4, class: vgpr_32 }
- - { id: 5, class: vgpr_32 }
- - { id: 6, class: vgpr_32 }
- - { id: 7, class: sreg_64 }
- - { id: 8, class: vgpr_32 }
- - { id: 9, class: vgpr_32 }
- - { id: 10, class: vreg_64 }
- - { id: 11, class: vreg_64 }
- - { id: 12, class: vreg_128 }
- - { id: 13, class: vreg_128 }
- - { id: 14, class: vgpr_32 }
- - { id: 15, class: vreg_64 }
- - { id: 16, class: vgpr_32 }
- - { id: 17, class: vreg_128 }
-body: |
- bb.0:
- successors: %bb.2, %bb.1
-
- %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, 0, implicit %exec
- %vcc = COPY killed %7
- S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
-
- bb.1:
- successors: %bb.3
-
- %4 = V_ADD_F32_e32 undef %6, undef %5, implicit %exec
- undef %12.sub0 = COPY killed %4
- %17 = COPY killed %12
- S_BRANCH %bb.3
-
- bb.2:
- successors: %bb.3
-
- %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit %exec
- undef %13.sub0 = COPY %8
- %13.sub1 = COPY %8
- %13.sub2 = COPY killed %8
- %0 = COPY killed %13
- %17 = COPY killed %0
-
- bb.3:
- %1 = COPY killed %17
- FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit %exec, implicit %flat_scr
- %14 = COPY %1.sub1
- %16 = COPY killed %1.sub0
- undef %15.sub0 = COPY killed %16
- %15.sub1 = COPY killed %14
- FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit %exec, implicit %flat_scr
- S_ENDPGM
-
-...
diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir b/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
new file mode 100644
index 0000000000000..770bfaddb23e7
--- /dev/null
+++ b/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
@@ -0,0 +1,155 @@
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=simple-register-coalescing,rename-independent-subregs -o - %s | FileCheck -check-prefix=GCN %s
+---
+
+# GCN-LABEL: name: mac_invalid_operands
+# GCN: undef %18.sub0 = V_MAC_F32_e32 undef %3, undef %9, undef %18.sub0, implicit %exec
+
+name: mac_invalid_operands
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_128 }
+ - { id: 1, class: vreg_128 }
+ - { id: 2, class: sgpr_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: vgpr_32 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: sreg_64 }
+ - { id: 8, class: vgpr_32 }
+ - { id: 9, class: vgpr_32 }
+ - { id: 10, class: vreg_64 }
+ - { id: 11, class: vreg_64 }
+ - { id: 12, class: vreg_128 }
+ - { id: 13, class: vreg_128 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: vreg_64 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vreg_128 }
+body: |
+ bb.0:
+ successors: %bb.2, %bb.1
+
+ %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, 0, implicit %exec
+ %vcc = COPY killed %7
+ S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
+
+ bb.1:
+ successors: %bb.3
+
+ %4 = V_ADD_F32_e32 undef %6, undef %5, implicit %exec
+ undef %12.sub0 = COPY killed %4
+ %17 = COPY killed %12
+ S_BRANCH %bb.3
+
+ bb.2:
+ successors: %bb.3
+
+ %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit %exec
+ undef %13.sub0 = COPY %8
+ %13.sub1 = COPY %8
+ %13.sub2 = COPY killed %8
+ %0 = COPY killed %13
+ %17 = COPY killed %0
+
+ bb.3:
+ %1 = COPY killed %17
+ FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit %exec, implicit %flat_scr
+ %14 = COPY %1.sub1
+ %16 = COPY killed %1.sub0
+ undef %15.sub0 = COPY killed %16
+ %15.sub1 = COPY killed %14
+ FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+ S_ENDPGM
+
+...
+---
+# Make sure others uses after the mac are properly handled and not
+# left unreplaced due to iterator issues from substituteRegister.
+
+# GCN-LABEL: name: vreg_does_not_dominate
+
+# GCN: undef %8.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %8.sub1, implicit %exec
+# GCN: undef %7.sub0 = V_MOV_B32_e32 0, implicit %exec
+# GCN: undef %9.sub2 = COPY %7.sub0
+
+# GCN: undef %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit %exec
+# GCN: undef %7.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec
+# GCN: %8.sub1 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit %exec
+
+# GCN: BUFFER_STORE_DWORD_OFFEN %6.sub3, %0,
+# GCN: BUFFER_STORE_DWORD_OFFEN %9.sub2, %0,
+# GCN: BUFFER_STORE_DWORD_OFFEN %8.sub1, %0,
+# GCN: BUFFER_STORE_DWORD_OFFEN %7.sub0, %0,
+name: vreg_does_not_dominate
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32, preferred-register: '' }
+ - { id: 1, class: vgpr_32, preferred-register: '' }
+ - { id: 2, class: vgpr_32, preferred-register: '' }
+ - { id: 3, class: vgpr_32, preferred-register: '' }
+ - { id: 4, class: vgpr_32, preferred-register: '' }
+ - { id: 5, class: sreg_64, preferred-register: '' }
+ - { id: 6, class: vreg_128, preferred-register: '' }
+liveins:
+ - { reg: '%vgpr0', virtual-reg: '%0' }
+ - { reg: '%sgpr30_sgpr31', virtual-reg: '%5' }
+body: |
+ bb.0:
+ successors: %bb.2, %bb.1
+ liveins: %vgpr0, %sgpr30_sgpr31, %sgpr5
+
+ %5 = COPY %sgpr30_sgpr31
+ %0 = COPY %vgpr0
+ undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit %exec
+ %6.sub0 = V_MOV_B32_e32 0, implicit %exec
+ %6.sub2 = COPY %6.sub0
+ S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.2
+
+ %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit %exec
+ %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec
+ %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit %exec
+ %6.sub2 = COPY %6.sub0
+
+ bb.2:
+ BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 12, 0, 0, 0, implicit %exec
+ BUFFER_STORE_DWORD_OFFEN %6.sub2, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 8, 0, 0, 0, implicit %exec
+ BUFFER_STORE_DWORD_OFFEN %6.sub1, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 4, 0, 0, 0, implicit %exec
+ BUFFER_STORE_DWORD_OFFEN %6.sub0, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+ %sgpr30_sgpr31 = COPY %5
+ %sgpr5 = COPY %sgpr5
+ S_SETPC_B64_return %sgpr30_sgpr31, implicit %sgpr5
+
+...
+
+# GCN-LABEL: name: inf_loop_tied_operand
+# GCN: bb.0:
+# GCN-NEXT: undef %2.sub0 = V_MAC_F32_e32 1073741824, undef %0, undef %2.sub0, implicit %exec
+# GCN-NEXT: dead undef %3.sub1 = COPY %2.sub0
+
+name: inf_loop_tied_operand
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32, preferred-register: '' }
+ - { id: 1, class: vgpr_32, preferred-register: '' }
+ - { id: 2, class: vreg_128, preferred-register: '' }
+body: |
+ bb.0:
+ %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit %exec
+ undef %2.sub0 = COPY %1
+ %2.sub1 = COPY %1
+
+...
diff --git a/test/CodeGen/AMDGPU/ret_jump.ll b/test/CodeGen/AMDGPU/ret_jump.ll
index e7a05d94cdc43..1acae60f30579 100644
--- a/test/CodeGen/AMDGPU/ret_jump.ll
+++ b/test/CodeGen/AMDGPU/ret_jump.ll
@@ -23,7 +23,7 @@
; GCN-NEXT: [[RET_BB]]:
; GCN-NEXT: ; return
; GCN-NEXT: .Lfunc_end0
-define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_trivial_ret_divergent_br_trivial_unreachable([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, i32 inreg %arg17, i32 %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
+define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_trivial_ret_divergent_br_trivial_unreachable([9 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, i32 inreg %arg17, i32 %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
entry:
%i.i = extractelement <2 x i32> %arg7, i32 0
%j.i = extractelement <2 x i32> %arg7, i32 1
@@ -75,7 +75,7 @@ ret.bb: ; preds = %else, %main_body
; GCN-NEXT: s_waitcnt
; GCN-NEXT: ; return
; GCN-NEXT: .Lfunc_end
-define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, i32 inreg %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
+define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable([9 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, i32 inreg %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
main_body:
%i.i = extractelement <2 x i32> %arg7, i32 0
%j.i = extractelement <2 x i32> %arg7, i32 1
@@ -119,9 +119,6 @@ declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-
-; Function Attrs: nounwind readnone
declare float @llvm.fabs.f32(float) #1
; Function Attrs: nounwind readnone
diff --git a/test/CodeGen/AMDGPU/scheduler-subrange-crash.ll b/test/CodeGen/AMDGPU/scheduler-subrange-crash.ll
index 47e32724d9ca2..5edc2c5c9b713 100644
--- a/test/CodeGen/AMDGPU/scheduler-subrange-crash.ll
+++ b/test/CodeGen/AMDGPU/scheduler-subrange-crash.ll
@@ -15,16 +15,16 @@ target triple = "amdgcn--"
define amdgpu_gs void @main(i32 inreg %arg) #0 {
main_body:
- %tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 20)
- %tmp1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 24)
- %tmp2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 48)
+ %tmp = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 20)
+ %tmp1 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 24)
+ %tmp2 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 48)
%array_vector3 = insertelement <4 x float> zeroinitializer, float %tmp2, i32 3
%array_vector5 = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %tmp, i32 1
%array_vector6 = insertelement <4 x float> %array_vector5, float undef, i32 2
%array_vector9 = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %tmp1, i32 1
%array_vector10 = insertelement <4 x float> %array_vector9, float 0.000000e+00, i32 2
%array_vector11 = insertelement <4 x float> %array_vector10, float undef, i32 3
- %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> undef, i32 undef, i32 4864, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
+ %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> undef, i32 undef, i32 4864, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
call void @llvm.amdgcn.tbuffer.store.i32(i32 %tmp3, <4 x i32> undef, i32 0, i32 0, i32 %arg, i32 36, i32 4, i32 4, i1 1, i1 1)
%bc = bitcast <4 x float> %array_vector3 to <4 x i32>
%tmp4 = extractelement <4 x i32> %bc, i32 undef
@@ -45,8 +45,8 @@ main_body:
ret void
}
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #2
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
+declare i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #2
declare void @llvm.amdgcn.tbuffer.store.i32(i32, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1) #3
attributes #0 = { nounwind "target-cpu"="tonga" }
diff --git a/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir b/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
new file mode 100644
index 0000000000000..4f5c582f8b583
--- /dev/null
+++ b/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
@@ -0,0 +1,446 @@
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=si-peephole-sdwa -verify-machineinstrs -o - %s | FileCheck -check-prefix=VI -check-prefix=GFX89 -check-prefix=GCN %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -run-pass=si-peephole-sdwa -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX9 -check-prefix=GFX89 -check-prefix=GCN %s
+
+# GFX89-LABEL: {{^}}name: vop1_instructions
+
+# GFX89: %{{[0-9]+}} = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+# GFX89: %{{[0-9]+}} = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+
+
+# GFX89: %{{[0-9]+}} = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+# GFX89: %{{[0-9]+}} = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+
+
+# VI: %{{[0-9]+}} = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit %exec
+
+# GFX9: %{{[0-9]+}} = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit %exec
+
+
+---
+name: vop1_instructions
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_64 }
+ - { id: 1, class: vreg_64 }
+ - { id: 2, class: sreg_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_32_xm0 }
+ - { id: 5, class: sreg_32_xm0 }
+ - { id: 6, class: sreg_32_xm0 }
+ - { id: 7, class: sreg_32_xm0 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: vgpr_32 }
+ - { id: 10, class: vgpr_32 }
+ - { id: 11, class: vgpr_32 }
+ - { id: 12, class: vgpr_32 }
+ - { id: 13, class: vgpr_32 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: vgpr_32 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vgpr_32 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vgpr_32 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 28, class: vgpr_32 }
+ - { id: 29, class: vgpr_32 }
+ - { id: 30, class: vgpr_32 }
+ - { id: 31, class: vgpr_32 }
+ - { id: 32, class: vgpr_32 }
+ - { id: 33, class: vgpr_32 }
+ - { id: 34, class: vgpr_32 }
+ - { id: 35, class: vgpr_32 }
+ - { id: 36, class: vgpr_32 }
+ - { id: 37, class: vgpr_32 }
+ - { id: 38, class: vgpr_32 }
+ - { id: 39, class: vgpr_32 }
+ - { id: 40, class: vgpr_32 }
+ - { id: 41, class: vgpr_32 }
+ - { id: 42, class: vgpr_32 }
+ - { id: 43, class: vgpr_32 }
+ - { id: 44, class: vgpr_32 }
+ - { id: 45, class: vgpr_32 }
+ - { id: 46, class: vgpr_32 }
+ - { id: 47, class: vgpr_32 }
+ - { id: 48, class: vgpr_32 }
+ - { id: 100, class: vgpr_32 }
+body: |
+ bb.0:
+ liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+
+ %2 = COPY %sgpr30_sgpr31
+ %1 = COPY %vgpr2_vgpr3
+ %0 = COPY %vgpr0_vgpr1
+ %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+
+ %5 = S_MOV_B32 65535
+ %6 = S_MOV_B32 65535
+
+ %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec
+ %11 = V_MOV_B32_e32 %10, implicit %exec
+ %12 = V_LSHLREV_B32_e64 16, %11, implicit %exec
+ %14 = V_FRACT_F32_e32 123, implicit %exec
+ %15 = V_LSHLREV_B32_e64 16, %14, implicit %exec
+ %16 = V_LSHRREV_B32_e64 16, %15, implicit %exec
+ %17 = V_SIN_F32_e32 %16, implicit %exec
+ %18 = V_LSHLREV_B32_e64 16, %17, implicit %exec
+ %19 = V_LSHRREV_B32_e64 16, %18, implicit %exec
+ %20 = V_CVT_U32_F32_e32 %19, implicit %exec
+ %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec
+ %23 = V_CVT_F32_I32_e32 123, implicit %exec
+ %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec
+
+ %25 = V_LSHRREV_B32_e64 16, %3, implicit %exec
+ %26 = V_MOV_B32_e64 %25, implicit %exec
+ %26 = V_LSHLREV_B32_e64 16, %26, implicit %exec
+ %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit %exec
+ %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec
+ %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec
+ %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit %exec
+ %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec
+ %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec
+ %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit %exec
+ %34 = V_LSHLREV_B32_e64 16, %33, implicit %exec
+ %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit %exec
+ %36 = V_LSHLREV_B32_e64 16, %35, implicit %exec
+
+
+ %37 = V_LSHRREV_B32_e64 16, %36, implicit %exec
+ %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit %exec
+ %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec
+ %40 = V_LSHRREV_B32_e64 16, %39, implicit %exec
+ %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit %exec
+ %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec
+ %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec
+ %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit %exec
+ %45 = V_LSHLREV_B32_e64 16, %44, implicit %exec
+ %46 = V_LSHRREV_B32_e64 16, %45, implicit %exec
+ %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit %exec
+ %48 = V_LSHLREV_B32_e64 16, %47, implicit %exec
+
+
+ %100 = V_MOV_B32_e32 %48, implicit %exec
+
+ FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+ %sgpr30_sgpr31 = COPY %2
+ S_SETPC_B64_return %sgpr30_sgpr31
+
+...
+---
+# GCN-LABEL: {{^}}name: vop2_instructions
+
+
+# VI: %{{[0-9]+}} = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+
+# GFX9: %{{[0-9]+}} = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec
+
+
+# VI: %{{[0-9]+}} = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+
+# GFX9: %{{[0-9]+}} = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec
+
+
+# VI: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec
+
+# GFX9: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec
+
+name: vop2_instructions
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_64 }
+ - { id: 1, class: vreg_64 }
+ - { id: 2, class: sreg_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_32_xm0 }
+ - { id: 5, class: sreg_32_xm0 }
+ - { id: 6, class: sreg_32_xm0 }
+ - { id: 7, class: sreg_32_xm0 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: vgpr_32 }
+ - { id: 10, class: vgpr_32 }
+ - { id: 11, class: vgpr_32 }
+ - { id: 12, class: vgpr_32 }
+ - { id: 13, class: vgpr_32 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: vgpr_32 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vgpr_32 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vgpr_32 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 28, class: vgpr_32 }
+ - { id: 29, class: vgpr_32 }
+ - { id: 30, class: vgpr_32 }
+ - { id: 31, class: vgpr_32 }
+ - { id: 32, class: vgpr_32 }
+ - { id: 33, class: vgpr_32 }
+ - { id: 34, class: vgpr_32 }
+ - { id: 35, class: vgpr_32 }
+ - { id: 36, class: vgpr_32 }
+ - { id: 37, class: vgpr_32 }
+ - { id: 38, class: vgpr_32 }
+ - { id: 39, class: vgpr_32 }
+ - { id: 40, class: vgpr_32 }
+ - { id: 41, class: vgpr_32 }
+ - { id: 42, class: vgpr_32 }
+ - { id: 43, class: vgpr_32 }
+ - { id: 44, class: vgpr_32 }
+ - { id: 45, class: vgpr_32 }
+ - { id: 46, class: vgpr_32 }
+ - { id: 47, class: vgpr_32 }
+ - { id: 48, class: vgpr_32 }
+ - { id: 49, class: vgpr_32 }
+ - { id: 50, class: vgpr_32 }
+ - { id: 51, class: vgpr_32 }
+ - { id: 52, class: vgpr_32 }
+ - { id: 53, class: vgpr_32 }
+ - { id: 54, class: vgpr_32 }
+ - { id: 55, class: vgpr_32 }
+ - { id: 56, class: vgpr_32 }
+ - { id: 57, class: vgpr_32 }
+ - { id: 58, class: vgpr_32 }
+ - { id: 59, class: vgpr_32 }
+ - { id: 60, class: vgpr_32 }
+ - { id: 100, class: vgpr_32 }
+body: |
+ bb.0:
+ liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+
+ %2 = COPY %sgpr30_sgpr31
+ %1 = COPY %vgpr2_vgpr3
+ %0 = COPY %vgpr0_vgpr1
+ %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+
+ %5 = S_MOV_B32 65535
+ %6 = S_MOV_B32 65535
+
+ %11 = V_LSHRREV_B32_e64 16, %3, implicit %exec
+ %12 = V_AND_B32_e32 %6, %11, implicit %exec
+ %13 = V_LSHLREV_B32_e64 16, %12, implicit %exec
+ %14 = V_LSHRREV_B32_e64 16, %13, implicit %exec
+ %15 = V_BFE_U32 %13, 8, 8, implicit %exec
+ %16 = V_ADD_F32_e32 %14, %15, implicit %exec
+ %17 = V_LSHLREV_B32_e64 16, %16, implicit %exec
+ %18 = V_LSHRREV_B32_e64 16, %17, implicit %exec
+ %19 = V_BFE_U32 %17, 8, 8, implicit %exec
+ %20 = V_SUB_F16_e32 %18, %19, implicit %exec
+ %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec
+ %22 = V_BFE_U32 %20, 8, 8, implicit %exec
+ %23 = V_MAC_F32_e32 %21, %22, %22, implicit %exec
+ %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec
+ %25 = V_LSHRREV_B32_e64 16, %24, implicit %exec
+ %26 = V_BFE_U32 %24, 8, 8, implicit %exec
+ %27 = V_MAC_F16_e32 %25, %26, %26, implicit %exec
+ %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec
+
+ %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec
+ %30 = V_AND_B32_e64 23, %29, implicit %exec
+ %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec
+ %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec
+ %33 = V_BFE_U32 %31, 8, 8, implicit %exec
+ %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit %exec
+ %35 = V_LSHLREV_B32_e64 16, %34, implicit %exec
+ %37 = V_BFE_U32 %35, 8, 8, implicit %exec
+ %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit %exec
+ %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec
+ %40 = V_BFE_U32 %39, 8, 8, implicit %exec
+ %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit %exec
+ %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec
+ %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec
+ %44 = V_BFE_U32 %42, 8, 8, implicit %exec
+ %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit %exec
+ %46 = V_LSHLREV_B32_e64 16, %45, implicit %exec
+
+ %47 = V_LSHRREV_B32_e64 16, %46, implicit %exec
+ %48 = V_BFE_U32 %46, 8, 8, implicit %exec
+ %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit %exec
+ %50 = V_LSHLREV_B32_e64 16, %49, implicit %exec
+ %51 = V_BFE_U32 %50, 8, 8, implicit %exec
+ %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit %exec
+ %53 = V_LSHLREV_B32_e64 16, %52, implicit %exec
+ %54 = V_BFE_U32 %53, 8, 8, implicit %exec
+ %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit %exec
+ %56 = V_LSHLREV_B32_e64 16, %55, implicit %exec
+ %57 = V_LSHRREV_B32_e64 16, %56, implicit %exec
+ %58 = V_BFE_U32 %56, 8, 8, implicit %exec
+ %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit %exec
+ %60 = V_LSHLREV_B32_e64 16, %59, implicit %exec
+
+ %100 = V_MOV_B32_e32 %60, implicit %exec
+
+ FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+ %sgpr30_sgpr31 = COPY %2
+ S_SETPC_B64_return %sgpr30_sgpr31
+
+...
+---
+
+# GCN-LABEL: {{^}}name: vopc_instructions
+
+# GFX89: %{{[0-9]+}} = V_MOV_B32_e32 123, implicit %exec
+# GFX89: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# GFX89: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX89: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# GFX89: %vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+
+
+# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# VI: %{{[0-9]+}} = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 0, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def %vcc, implicit %exec
+# VI: %{{[0-9]+}} = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def %exec, implicit %exec
+
+# GFX9: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MOV_B32_e32 23, implicit %exec
+# GFX9: %{{[0-9]+}} = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MOV_B32_e32 23, implicit %exec
+# GFX9: %{{[0-9]+}} = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+
+
+# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 2, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 2, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, 2, implicit-def %exec, implicit %exec
+
+# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 0, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 2, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 2, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, 2, implicit-def %exec, implicit %exec
+
+
+name: vopc_instructions
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_64 }
+ - { id: 1, class: vreg_64 }
+ - { id: 2, class: sreg_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_32_xm0 }
+ - { id: 5, class: sreg_32_xm0 }
+ - { id: 6, class: sreg_32_xm0 }
+ - { id: 7, class: sreg_32_xm0 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: vgpr_32 }
+ - { id: 10, class: vgpr_32 }
+ - { id: 11, class: vgpr_32 }
+ - { id: 12, class: vgpr_32 }
+ - { id: 13, class: vgpr_32 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: vgpr_32 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: sreg_64 }
+ - { id: 19, class: sreg_64 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vgpr_32 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 100, class: vgpr_32 }
+body: |
+ bb.0:
+ liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+
+ %2 = COPY %sgpr30_sgpr31
+ %1 = COPY %vgpr2_vgpr3
+ %0 = COPY %vgpr0_vgpr1
+ %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+
+ %5 = S_MOV_B32 65535
+ %6 = S_MOV_B32 65535
+
+ %10 = V_AND_B32_e64 %5, %3, implicit %exec
+ V_CMP_EQ_F32_e32 123, killed %10, implicit-def %vcc, implicit %exec
+ %11 = V_AND_B32_e64 %5, %3, implicit %exec
+ V_CMPX_GT_F32_e32 123, killed %11, implicit-def %vcc, implicit-def %exec, implicit %exec
+ %12 = V_AND_B32_e64 %5, %3, implicit %exec
+ V_CMP_LT_I32_e32 123, killed %12, implicit-def %vcc, implicit %exec
+ %13 = V_AND_B32_e64 %5, %3, implicit %exec
+ V_CMPX_EQ_I32_e32 123, killed %13, implicit-def %vcc, implicit-def %exec, implicit %exec
+
+ %14 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, 0, implicit %exec
+ %15 = V_AND_B32_e64 %5, %3, implicit %exec
+ %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, 0, implicit-def %exec, implicit %exec
+ %16 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit %exec
+ %17 = V_AND_B32_e64 %5, %3, implicit %exec
+ %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def %exec, implicit %exec
+
+ %20 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, 0, implicit %exec
+ %21 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, 2, implicit-def %exec, implicit %exec
+ %23 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, 2, implicit %exec
+ %24 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, 0, implicit-def %exec, implicit %exec
+ %25 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, 0, implicit-def %exec, implicit %exec
+ %26 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, 0, implicit-def %exec, implicit %exec
+ %27 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, 2, implicit-def %exec, implicit %exec
+
+
+ %100 = V_MOV_B32_e32 %vcc_lo, implicit %exec
+
+ FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+ %sgpr30_sgpr31 = COPY %2
+ S_SETPC_B64_return %sgpr30_sgpr31
diff --git a/test/CodeGen/AMDGPU/select-vectors.ll b/test/CodeGen/AMDGPU/select-vectors.ll
index 4b00a48211ecf..ebbc675b2babe 100644
--- a/test/CodeGen/AMDGPU/select-vectors.ll
+++ b/test/CodeGen/AMDGPU/select-vectors.ll
@@ -66,7 +66,7 @@ define amdgpu_kernel void @v_select_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8
}
; GCN-LABEL: {{^}}select_v4i8:
-; GCN: v_cndmask_b32_e32
+; GCN: v_cndmask_b32
; GCN-NOT: cndmask
define amdgpu_kernel void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) #0 {
%cmp = icmp eq i8 %c, 0
diff --git a/test/CodeGen/AMDGPU/setcc-sext.ll b/test/CodeGen/AMDGPU/setcc-sext.ll
new file mode 100644
index 0000000000000..eadce225e3502
--- /dev/null
+++ b/test/CodeGen/AMDGPU/setcc-sext.ll
@@ -0,0 +1,292 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}setcc_sgt_true_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_sgt_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp sgt i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_sgt_true_sext_swap:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_sgt_true_sext_swap(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp slt i32 -1, %ext
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_ne_true_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ne_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ne i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_ult_true_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ult_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ult i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_eq_true_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_eq_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp eq i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_sle_true_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_sle_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp sle i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_uge_true_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_uge_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp uge i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_eq_false_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_eq_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp eq i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_sge_false_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_sge_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp sge i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_ule_false_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ule_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ule i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_ne_false_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ne_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ne i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+; GCN-LABEL: {{^}}setcc_ugt_false_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ugt_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ugt i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+; GCN-LABEL: {{^}}setcc_slt_false_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_slt_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp slt i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+declare i32 @llvm.amdgcn.workitem.id.y() #0
+
+attributes #0 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/sgpr-copy.ll b/test/CodeGen/AMDGPU/sgpr-copy.ll
index 5c20e9a8d5859..931051102cd5c 100644
--- a/test/CodeGen/AMDGPU/sgpr-copy.ll
+++ b/test/CodeGen/AMDGPU/sgpr-copy.ll
@@ -4,13 +4,13 @@
; CHECK-LABEL: {{^}}phi1:
; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0
; CHECK: v_mov_b32_e32 v{{[0-9]}}, [[DST]]
-define amdgpu_ps void @phi1(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @phi1(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 0)
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 32)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 0)
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 32)
%tmp24 = fptosi float %tmp22 to i32
%tmp25 = icmp ne i32 %tmp24, 0
br i1 %tmp25, label %ENDIF, label %ELSE
@@ -28,29 +28,29 @@ ENDIF: ; preds = %ELSE, %main_body
; Make sure this program doesn't crash
; CHECK-LABEL: {{^}}phi2:
-define amdgpu_ps void @phi2(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #1 {
+define amdgpu_ps void @phi2(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #1 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 32)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 36)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 40)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 48)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 52)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 56)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 64)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 68)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 72)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 76)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 80)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 84)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 88)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 92)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 32)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 36)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 40)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 48)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 52)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 56)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 64)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 68)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 72)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 76)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 80)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 84)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 88)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 92)
%tmp36 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %arg2, i32 0
%tmp37 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp36, !tbaa !0
- %tmp38 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg1, i32 0
- %tmp39 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp38, !tbaa !0
+ %tmp38 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg1, i32 0
+ %tmp39 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp38, !tbaa !0
%i.i = extractelement <2 x i32> %arg5, i32 0
%j.i = extractelement <2 x i32> %arg5, i32 1
%i.f.i = bitcast i32 %i.i to float
@@ -85,7 +85,7 @@ main_body:
%tmp46 = bitcast float %p2.i24 to i32
%tmp47 = insertelement <2 x i32> undef, i32 %tmp45, i32 0
%tmp48 = insertelement <2 x i32> %tmp47, i32 %tmp46, i32 1
- %tmp39.bc = bitcast <16 x i8> %tmp39 to <4 x i32>
+ %tmp39.bc = bitcast <4 x i32> %tmp39 to <4 x i32>
%a.bc.i = bitcast <2 x i32> %tmp48 to <2 x float>
%tmp1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i, <8 x i32> %tmp37, <4 x i32> %tmp39.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp50 = extractelement <4 x float> %tmp1, i32 2
@@ -173,14 +173,14 @@ ENDIF24: ; preds = %IF25, %ENDIF
; We just want ot make sure the program doesn't crash
; CHECK-LABEL: {{^}}loop:
-define amdgpu_ps void @loop(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @loop(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 0)
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 4)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 8)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 12)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 0)
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 4)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 8)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 12)
%tmp25 = fptosi float %tmp24 to i32
%tmp26 = bitcast i32 %tmp25 to float
%tmp27 = bitcast float %tmp26 to i32
@@ -226,17 +226,17 @@ ENDIF: ; preds = %LOOP
; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[SAMPLE_LO]]:[[SAMPLE_HI]]{{\]}}
; CHECK: exp
; CHECK: s_endpgm
-define amdgpu_ps void @sample_v3([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
+define amdgpu_ps void @sample_v3([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
entry:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
- %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 16)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i64 0, i32 0
+ %tmp21 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 16)
%tmp23 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 0
%tmp24 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp23, !tbaa !0
- %tmp25 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 0
- %tmp26 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp25, !tbaa !0
+ %tmp25 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 0
+ %tmp26 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp25, !tbaa !0
%tmp27 = fcmp oeq float %tmp22, 0.000000e+00
- %tmp26.bc = bitcast <16 x i8> %tmp26 to <4 x i32>
+ %tmp26.bc = bitcast <4 x i32> %tmp26 to <4 x i32>
br i1 %tmp27, label %if, label %else
if: ; preds = %entry
@@ -290,7 +290,7 @@ endif: ; preds = %if1, %if0, %entry
; This test is just checking that we don't crash / assertion fail.
; CHECK-LABEL: {{^}}copy2:
; CHECK: s_endpgm
-define amdgpu_ps void @copy2([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
+define amdgpu_ps void @copy2([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
entry:
br label %LOOP68
@@ -326,11 +326,11 @@ ENDIF69: ; preds = %LOOP68
; [[END]]:
; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[ADD]]{{\]}}
; CHECK: s_endpgm
-define amdgpu_ps void @sample_rsrc([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <4 x i32>] addrspace(2)* byval %arg2, [32 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
+define amdgpu_ps void @sample_rsrc([6 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <4 x i32>] addrspace(2)* byval %arg2, [32 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
bb:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i32 0, i32 0
- %tmp22 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !3
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp22, i32 16)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg1, i32 0, i32 0
+ %tmp22 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !3
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp22, i32 16)
%tmp25 = getelementptr [32 x <8 x i32>], [32 x <8 x i32>] addrspace(2)* %arg3, i32 0, i32 0
%tmp26 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp25, !tbaa !3
%tmp27 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(2)* %arg2, i32 0, i32 0
@@ -420,7 +420,7 @@ declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1)
declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
index a6026785b1739..c70eb9b9c4a53 100644
--- a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
+++ b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
@@ -151,10 +151,11 @@ define amdgpu_kernel void @v_uextract_bit_1_31_i64(i64 addrspace(1)* %out, i64 a
ret void
}
-; Spans the dword boundary, so requires full shift
+; Spans the dword boundary, so requires full shift.
+; Truncated after the shift, so only low shift result is used.
; GCN-LABEL: {{^}}v_uextract_bit_31_32_i64:
-; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 31
+; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v[[SHRLO:[0-9]+]], v[[VALHI]], v[[VALLO]], 31
; GCN-DAG: v_and_b32_e32 v[[AND:[0-9]+]], 3, v[[SHRLO]]{{$}}
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[AND]]:[[ZERO]]{{\]}}
@@ -188,8 +189,8 @@ define amdgpu_kernel void @v_uextract_bit_32_33_i64(i64 addrspace(1)* %out, i64
; GCN-LABEL: {{^}}v_uextract_bit_30_60_i64:
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 30
+; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v[[SHRLO:[0-9]+]], v[[VALHI]], v[[VALLO]], 30
; GCN-DAG: v_and_b32_e32 v[[AND:[0-9]+]], 0x3fffffff, v[[SHRLO]]{{$}}
; GCN-DAG: v_mov_b32_e32 v[[ZERO1:[0-9]+]], v[[ZERO]]
; GCN: buffer_store_dwordx2 v{{\[}}[[AND]]:[[ZERO1]]{{\]}}
@@ -223,10 +224,9 @@ define amdgpu_kernel void @v_uextract_bit_33_63_i64(i64 addrspace(1)* %out, i64
; GCN-LABEL: {{^}}v_uextract_bit_31_63_i64:
; GCN: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 31
-; GCN-NEXT: v_mov_b32_e32 v[[SHRHI]], v[[ZERO]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[SHRLO]]:[[SHRHI]]{{\]}}
+; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v[[SHRLO:[0-9]+]], v[[VALHI]], v[[VALLO]], 31
+; GCN: buffer_store_dwordx2 v{{\[}}[[SHRLO]]:[[ZERO]]{{\]}}
define amdgpu_kernel void @v_uextract_bit_31_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
@@ -288,8 +288,8 @@ define amdgpu_kernel void @v_uextract_bit_33_i64_trunc_i32(i32 addrspace(1)* %ou
}
; GCN-LABEL: {{^}}v_uextract_bit_31_32_i64_trunc_i32:
-; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 31
+; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v[[SHRLO:[0-9]+]], v[[VALHI]], v[[VALLO]], 31
; GCN-NEXT: v_and_b32_e32 v[[SHRLO]], 3, v[[SHRLO]]
; GCN-NOT: v[[SHRLO]]
; GCN: buffer_store_dword v[[SHRLO]]
diff --git a/test/CodeGen/AMDGPU/shift-i64-opts.ll b/test/CodeGen/AMDGPU/shift-i64-opts.ll
index a803849be02c4..5306e190a4f9c 100644
--- a/test/CodeGen/AMDGPU/shift-i64-opts.ll
+++ b/test/CodeGen/AMDGPU/shift-i64-opts.ll
@@ -243,3 +243,77 @@ define amdgpu_kernel void @trunc_shl_31_i32_i64_multi_use(i32 addrspace(1)* %out
store volatile i64 %shl, i64 addrspace(1)* %in
ret void
}
+
+; GCN-LABEL: {{^}}trunc_shl_and31:
+; GCN: s_and_b32 s[[AMT:[0-9]+]], s{{[0-9]+}}, 31
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, s[[AMT]], v{{[0-9]+}}
+; GCN-NOT: v_lshl_b64
+; GCN-NOT: v_lshlrev_b64
+define amdgpu_kernel void @trunc_shl_and31(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 31
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = shl i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_shl_and30:
+; GCN: s_and_b32 s[[AMT:[0-9]+]], s{{[0-9]+}}, 30
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, s[[AMT]], v{{[0-9]+}}
+; GCN-NOT: v_lshl_b64
+; GCN-NOT: v_lshlrev_b64
+define amdgpu_kernel void @trunc_shl_and30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 30
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = shl i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_shl_wrong_and63:
+; Negative test, wrong constant
+; GCN: v_lshl_b64
+define amdgpu_kernel void @trunc_shl_wrong_and63(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 63
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = shl i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_shl_no_and:
+; Negative test, shift can be full 64 bit
+; GCN: v_lshl_b64
+define amdgpu_kernel void @trunc_shl_no_and(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp4 = zext i32 %arg2 to i64
+ %tmp5 = shl i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_shl_vec_vec:
+; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}
+; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 4, v{{[0-9]+}}
+; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
+; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 6, v{{[0-9]+}}
+; GCN-NOT: v_lshl_b64
+; GCN-NOT: v_lshlrev_b64
+define amdgpu_kernel void @trunc_shl_vec_vec(<4 x i64> addrspace(1)* %arg) {
+bb:
+ %v = load <4 x i64>, <4 x i64> addrspace(1)* %arg, align 32
+ %shl = shl <4 x i64> %v, <i64 3, i64 4, i64 5, i64 6>
+ store <4 x i64> %shl, <4 x i64> addrspace(1)* %arg, align 32
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/si-lod-bias.ll b/test/CodeGen/AMDGPU/si-lod-bias.ll
index 3a7359ea4ffaf..4224980665097 100644
--- a/test/CodeGen/AMDGPU/si-lod-bias.ll
+++ b/test/CodeGen/AMDGPU/si-lod-bias.ll
@@ -6,15 +6,15 @@
; GCN-LABEL: {{^}}main:
; GCN: image_sample_b v{{\[[0-9]:[0-9]\]}}, v{{\[[0-9]:[0-9]\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0xf
-define amdgpu_ps void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @main(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
%tmp22 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %arg2, i32 0
%tmp23 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp22, !tbaa !0
- %tmp24 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg1, i32 0
- %tmp25 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp24, !tbaa !0
+ %tmp24 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg1, i32 0
+ %tmp25 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp24, !tbaa !0
%i.i = extractelement <2 x i32> %arg5, i32 0
%j.i = extractelement <2 x i32> %arg5, i32 1
%i.f.i = bitcast i32 %i.i to float
@@ -34,9 +34,8 @@ main_body:
%tmp32 = insertelement <4 x i32> %tmp31, i32 %tmp29, i32 1
%tmp33 = insertelement <4 x i32> %tmp32, i32 %tmp30, i32 2
%tmp34 = insertelement <4 x i32> %tmp33, i32 undef, i32 3
- %tmp25.bc = bitcast <16 x i8> %tmp25 to <4 x i32>
%tmp34.bc = bitcast <4 x i32> %tmp34 to <4 x float>
- %tmp35 = call <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float> %tmp34.bc, <8 x i32> %tmp23, <4 x i32> %tmp25.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp35 = call <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float> %tmp34.bc, <8 x i32> %tmp23, <4 x i32> %tmp25, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp36 = extractelement <4 x float> %tmp35, i32 0
%tmp37 = extractelement <4 x float> %tmp35, i32 1
%tmp38 = extractelement <4 x float> %tmp35, i32 2
@@ -49,7 +48,7 @@ declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
declare <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/si-sgpr-spill.ll b/test/CodeGen/AMDGPU/si-sgpr-spill.ll
index 8731e74d63a05..3e70f2c778260 100644
--- a/test/CodeGen/AMDGPU/si-sgpr-spill.ll
+++ b/test/CodeGen/AMDGPU/si-sgpr-spill.ll
@@ -24,81 +24,81 @@
; GCN: s_endpgm
; TOVGPR: ScratchSize: 0{{$}}
-define amdgpu_ps void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) {
+define amdgpu_ps void @main([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) {
main_body:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
- %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 96)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 100)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 104)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 112)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 116)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 120)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 128)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 132)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 140)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 144)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 160)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 176)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 180)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 184)
- %tmp36 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 192)
- %tmp37 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 196)
- %tmp38 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 200)
- %tmp39 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 208)
- %tmp40 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 212)
- %tmp41 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 216)
- %tmp42 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 224)
- %tmp43 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 240)
- %tmp44 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 244)
- %tmp45 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 248)
- %tmp46 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 256)
- %tmp47 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 272)
- %tmp48 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 276)
- %tmp49 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 280)
- %tmp50 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 288)
- %tmp51 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 292)
- %tmp52 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 296)
- %tmp53 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 304)
- %tmp54 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 308)
- %tmp55 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 312)
- %tmp56 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 368)
- %tmp57 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 372)
- %tmp58 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 376)
- %tmp59 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 384)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i64 0, i32 0
+ %tmp21 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 96)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 100)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 104)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 112)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 116)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 120)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 128)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 132)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 140)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 144)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 160)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 176)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 180)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 184)
+ %tmp36 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 192)
+ %tmp37 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 196)
+ %tmp38 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 200)
+ %tmp39 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 208)
+ %tmp40 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 212)
+ %tmp41 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 216)
+ %tmp42 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 224)
+ %tmp43 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 240)
+ %tmp44 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 244)
+ %tmp45 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 248)
+ %tmp46 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 256)
+ %tmp47 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 272)
+ %tmp48 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 276)
+ %tmp49 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 280)
+ %tmp50 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 288)
+ %tmp51 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 292)
+ %tmp52 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 296)
+ %tmp53 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 304)
+ %tmp54 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 308)
+ %tmp55 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 312)
+ %tmp56 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 368)
+ %tmp57 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 372)
+ %tmp58 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 376)
+ %tmp59 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 384)
%tmp60 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 0
%tmp61 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp60, !tbaa !0
- %tmp62 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 0
- %tmp63 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp62, !tbaa !0
- %tmp63.bc = bitcast <16 x i8> %tmp63 to <4 x i32>
+ %tmp62 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 0
+ %tmp63 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp62, !tbaa !0
+ %tmp63.bc = bitcast <4 x i32> %tmp63 to <4 x i32>
%tmp64 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 1
%tmp65 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp64, !tbaa !0
- %tmp66 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 1
- %tmp67 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp66, !tbaa !0
+ %tmp66 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 1
+ %tmp67 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp66, !tbaa !0
%tmp68 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 2
%tmp69 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp68, !tbaa !0
- %tmp70 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 2
- %tmp71 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp70, !tbaa !0
+ %tmp70 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 2
+ %tmp71 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp70, !tbaa !0
%tmp72 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 3
%tmp73 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp72, !tbaa !0
- %tmp74 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 3
- %tmp75 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp74, !tbaa !0
+ %tmp74 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 3
+ %tmp75 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp74, !tbaa !0
%tmp76 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 4
%tmp77 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp76, !tbaa !0
- %tmp78 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 4
- %tmp79 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp78, !tbaa !0
+ %tmp78 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 4
+ %tmp79 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp78, !tbaa !0
%tmp80 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 5
%tmp81 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp80, !tbaa !0
- %tmp82 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 5
- %tmp83 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp82, !tbaa !0
+ %tmp82 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 5
+ %tmp83 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp82, !tbaa !0
%tmp84 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 6
%tmp85 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp84, !tbaa !0
- %tmp86 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 6
- %tmp87 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp86, !tbaa !0
+ %tmp86 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 6
+ %tmp87 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp86, !tbaa !0
%tmp88 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 7
%tmp89 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp88, !tbaa !0
- %tmp90 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 7
- %tmp91 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp90, !tbaa !0
+ %tmp90 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 7
+ %tmp91 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp90, !tbaa !0
%i.i = extractelement <2 x i32> %arg6, i32 0
%j.i = extractelement <2 x i32> %arg6, i32 1
%i.f.i = bitcast i32 %i.i to float
@@ -410,7 +410,7 @@ IF67: ; preds = %LOOP65
%tmp274 = insertelement <8 x i32> %tmp273, i32 %tmp268, i32 5
%tmp275 = insertelement <8 x i32> %tmp274, i32 undef, i32 6
%tmp276 = insertelement <8 x i32> %tmp275, i32 undef, i32 7
- %tmp67.bc = bitcast <16 x i8> %tmp67 to <4 x i32>
+ %tmp67.bc = bitcast <4 x i32> %tmp67 to <4 x i32>
%tmp276.bc = bitcast <8 x i32> %tmp276 to <8 x float>
%tmp277 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp276.bc, <8 x i32> %tmp65, <4 x i32> %tmp67.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp278 = extractelement <4 x float> %tmp277, i32 0
@@ -432,7 +432,7 @@ IF67: ; preds = %LOOP65
%tmp294 = insertelement <8 x i32> %tmp293, i32 %tmp288, i32 5
%tmp295 = insertelement <8 x i32> %tmp294, i32 undef, i32 6
%tmp296 = insertelement <8 x i32> %tmp295, i32 undef, i32 7
- %tmp83.bc = bitcast <16 x i8> %tmp83 to <4 x i32>
+ %tmp83.bc = bitcast <4 x i32> %tmp83 to <4 x i32>
%tmp296.bc = bitcast <8 x i32> %tmp296 to <8 x float>
%tmp297 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp296.bc, <8 x i32> %tmp81, <4 x i32> %tmp83.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp298 = extractelement <4 x float> %tmp297, i32 0
@@ -452,7 +452,7 @@ IF67: ; preds = %LOOP65
%tmp312 = insertelement <8 x i32> %tmp311, i32 %tmp306, i32 5
%tmp313 = insertelement <8 x i32> %tmp312, i32 undef, i32 6
%tmp314 = insertelement <8 x i32> %tmp313, i32 undef, i32 7
- %tmp79.bc = bitcast <16 x i8> %tmp79 to <4 x i32>
+ %tmp79.bc = bitcast <4 x i32> %tmp79 to <4 x i32>
%tmp314.bc = bitcast <8 x i32> %tmp314 to <8 x float>
%tmp315 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp314.bc, <8 x i32> %tmp77, <4 x i32> %tmp79.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp316 = extractelement <4 x float> %tmp315, i32 0
@@ -515,7 +515,7 @@ IF67: ; preds = %LOOP65
%tmp372 = insertelement <8 x i32> %tmp371, i32 %tmp366, i32 5
%tmp373 = insertelement <8 x i32> %tmp372, i32 undef, i32 6
%tmp374 = insertelement <8 x i32> %tmp373, i32 undef, i32 7
- %tmp71.bc = bitcast <16 x i8> %tmp71 to <4 x i32>
+ %tmp71.bc = bitcast <4 x i32> %tmp71 to <4 x i32>
%tmp374.bc = bitcast <8 x i32> %tmp374 to <8 x float>
%tmp375 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp374.bc, <8 x i32> %tmp69, <4 x i32> %tmp71.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp376 = extractelement <4 x float> %tmp375, i32 0
@@ -571,7 +571,7 @@ IF67: ; preds = %LOOP65
%tmp426 = insertelement <8 x i32> %tmp425, i32 %tmp420, i32 5
%tmp427 = insertelement <8 x i32> %tmp426, i32 undef, i32 6
%tmp428 = insertelement <8 x i32> %tmp427, i32 undef, i32 7
- %tmp87.bc = bitcast <16 x i8> %tmp87 to <4 x i32>
+ %tmp87.bc = bitcast <4 x i32> %tmp87 to <4 x i32>
%tmp428.bc = bitcast <8 x i32> %tmp428 to <8 x float>
%tmp429 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp428.bc, <8 x i32> %tmp85, <4 x i32> %tmp87.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp430 = extractelement <4 x float> %tmp429, i32 0
@@ -624,7 +624,7 @@ IF67: ; preds = %LOOP65
%tmp467 = insertelement <4 x i32> %tmp466, i32 %tmp464, i32 1
%tmp468 = insertelement <4 x i32> %tmp467, i32 %tmp465, i32 2
%tmp469 = insertelement <4 x i32> %tmp468, i32 undef, i32 3
- %tmp91.bc = bitcast <16 x i8> %tmp91 to <4 x i32>
+ %tmp91.bc = bitcast <4 x i32> %tmp91 to <4 x i32>
%tmp469.bc = bitcast <4 x i32> %tmp469 to <4 x float>
%tmp470 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tmp469.bc, <8 x i32> %tmp89, <4 x i32> %tmp91.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%tmp471 = extractelement <4 x float> %tmp470, i32 0
@@ -727,7 +727,7 @@ IF67: ; preds = %LOOP65
%tmp568 = insertelement <8 x i32> %tmp567, i32 %tmp562, i32 5
%tmp569 = insertelement <8 x i32> %tmp568, i32 undef, i32 6
%tmp570 = insertelement <8 x i32> %tmp569, i32 undef, i32 7
- %tmp75.bc = bitcast <16 x i8> %tmp75 to <4 x i32>
+ %tmp75.bc = bitcast <4 x i32> %tmp75 to <4 x i32>
%tmp570.bc = bitcast <8 x i32> %tmp570 to <8 x float>
%tmp571 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp570.bc, <8 x i32> %tmp73, <4 x i32> %tmp75.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp572 = extractelement <4 x float> %tmp571, i32 0
@@ -778,149 +778,149 @@ ENDIF66: ; preds = %LOOP65
; GCN-LABEL: {{^}}main1:
; GCN: s_endpgm
; TOVGPR: ScratchSize: 0{{$}}
-define amdgpu_ps void @main1([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
+define amdgpu_ps void @main1([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
main_body:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
- %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 0)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 4)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 8)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 12)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 28)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 48)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 52)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 56)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 64)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 68)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 72)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 76)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 128)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 132)
- %tmp36 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 144)
- %tmp37 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 148)
- %tmp38 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 152)
- %tmp39 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 160)
- %tmp40 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 164)
- %tmp41 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 168)
- %tmp42 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 172)
- %tmp43 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 176)
- %tmp44 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 180)
- %tmp45 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 184)
- %tmp46 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 192)
- %tmp47 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 196)
- %tmp48 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 200)
- %tmp49 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 208)
- %tmp50 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 212)
- %tmp51 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 216)
- %tmp52 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 220)
- %tmp53 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 236)
- %tmp54 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 240)
- %tmp55 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 244)
- %tmp56 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 248)
- %tmp57 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 252)
- %tmp58 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 256)
- %tmp59 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 260)
- %tmp60 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 264)
- %tmp61 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 268)
- %tmp62 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 272)
- %tmp63 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 276)
- %tmp64 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 280)
- %tmp65 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 284)
- %tmp66 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 288)
- %tmp67 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 292)
- %tmp68 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 464)
- %tmp69 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 468)
- %tmp70 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 472)
- %tmp71 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 496)
- %tmp72 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 500)
- %tmp73 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 504)
- %tmp74 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 512)
- %tmp75 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 516)
- %tmp76 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 524)
- %tmp77 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 532)
- %tmp78 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 536)
- %tmp79 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 540)
- %tmp80 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 544)
- %tmp81 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 548)
- %tmp82 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 552)
- %tmp83 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 556)
- %tmp84 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 560)
- %tmp85 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 564)
- %tmp86 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 568)
- %tmp87 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 572)
- %tmp88 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 576)
- %tmp89 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 580)
- %tmp90 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 584)
- %tmp91 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 588)
- %tmp92 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 592)
- %tmp93 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 596)
- %tmp94 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 600)
- %tmp95 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 604)
- %tmp96 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 608)
- %tmp97 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 612)
- %tmp98 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 616)
- %tmp99 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 624)
- %tmp100 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 628)
- %tmp101 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 632)
- %tmp102 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 636)
- %tmp103 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 640)
- %tmp104 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 644)
- %tmp105 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 648)
- %tmp106 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 652)
- %tmp107 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 656)
- %tmp108 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 660)
- %tmp109 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 664)
- %tmp110 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 668)
- %tmp111 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 672)
- %tmp112 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 676)
- %tmp113 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 680)
- %tmp114 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 684)
- %tmp115 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 688)
- %tmp116 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 692)
- %tmp117 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 696)
- %tmp118 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 700)
- %tmp119 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 704)
- %tmp120 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 708)
- %tmp121 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 712)
- %tmp122 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 716)
- %tmp123 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 864)
- %tmp124 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 868)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i64 0, i32 0
+ %tmp21 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 0)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 4)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 8)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 12)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 28)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 48)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 52)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 56)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 64)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 68)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 72)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 76)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 128)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 132)
+ %tmp36 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 144)
+ %tmp37 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 148)
+ %tmp38 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 152)
+ %tmp39 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 160)
+ %tmp40 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 164)
+ %tmp41 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 168)
+ %tmp42 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 172)
+ %tmp43 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 176)
+ %tmp44 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 180)
+ %tmp45 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 184)
+ %tmp46 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 192)
+ %tmp47 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 196)
+ %tmp48 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 200)
+ %tmp49 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 208)
+ %tmp50 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 212)
+ %tmp51 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 216)
+ %tmp52 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 220)
+ %tmp53 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 236)
+ %tmp54 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 240)
+ %tmp55 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 244)
+ %tmp56 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 248)
+ %tmp57 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 252)
+ %tmp58 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 256)
+ %tmp59 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 260)
+ %tmp60 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 264)
+ %tmp61 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 268)
+ %tmp62 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 272)
+ %tmp63 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 276)
+ %tmp64 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 280)
+ %tmp65 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 284)
+ %tmp66 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 288)
+ %tmp67 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 292)
+ %tmp68 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 464)
+ %tmp69 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 468)
+ %tmp70 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 472)
+ %tmp71 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 496)
+ %tmp72 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 500)
+ %tmp73 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 504)
+ %tmp74 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 512)
+ %tmp75 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 516)
+ %tmp76 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 524)
+ %tmp77 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 532)
+ %tmp78 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 536)
+ %tmp79 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 540)
+ %tmp80 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 544)
+ %tmp81 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 548)
+ %tmp82 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 552)
+ %tmp83 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 556)
+ %tmp84 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 560)
+ %tmp85 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 564)
+ %tmp86 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 568)
+ %tmp87 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 572)
+ %tmp88 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 576)
+ %tmp89 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 580)
+ %tmp90 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 584)
+ %tmp91 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 588)
+ %tmp92 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 592)
+ %tmp93 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 596)
+ %tmp94 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 600)
+ %tmp95 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 604)
+ %tmp96 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 608)
+ %tmp97 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 612)
+ %tmp98 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 616)
+ %tmp99 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 624)
+ %tmp100 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 628)
+ %tmp101 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 632)
+ %tmp102 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 636)
+ %tmp103 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 640)
+ %tmp104 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 644)
+ %tmp105 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 648)
+ %tmp106 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 652)
+ %tmp107 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 656)
+ %tmp108 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 660)
+ %tmp109 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 664)
+ %tmp110 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 668)
+ %tmp111 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 672)
+ %tmp112 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 676)
+ %tmp113 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 680)
+ %tmp114 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 684)
+ %tmp115 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 688)
+ %tmp116 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 692)
+ %tmp117 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 696)
+ %tmp118 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 700)
+ %tmp119 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 704)
+ %tmp120 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 708)
+ %tmp121 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 712)
+ %tmp122 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 716)
+ %tmp123 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 864)
+ %tmp124 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 868)
%tmp125 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 0
%tmp126 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp125, !tbaa !0
- %tmp127 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 0
- %tmp128 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp127, !tbaa !0
+ %tmp127 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 0
+ %tmp128 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp127, !tbaa !0
%tmp129 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 1
%tmp130 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp129, !tbaa !0
- %tmp131 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 1
- %tmp132 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp131, !tbaa !0
+ %tmp131 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 1
+ %tmp132 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp131, !tbaa !0
%tmp133 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 2
%tmp134 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp133, !tbaa !0
- %tmp135 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 2
- %tmp136 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp135, !tbaa !0
+ %tmp135 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 2
+ %tmp136 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp135, !tbaa !0
%tmp137 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 3
%tmp138 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp137, !tbaa !0
- %tmp139 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 3
- %tmp140 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp139, !tbaa !0
+ %tmp139 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 3
+ %tmp140 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp139, !tbaa !0
%tmp141 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 4
%tmp142 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp141, !tbaa !0
- %tmp143 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 4
- %tmp144 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp143, !tbaa !0
+ %tmp143 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 4
+ %tmp144 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp143, !tbaa !0
%tmp145 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 5
%tmp146 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp145, !tbaa !0
- %tmp147 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 5
- %tmp148 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp147, !tbaa !0
+ %tmp147 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 5
+ %tmp148 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp147, !tbaa !0
%tmp149 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 6
%tmp150 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp149, !tbaa !0
- %tmp151 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 6
- %tmp152 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp151, !tbaa !0
+ %tmp151 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 6
+ %tmp152 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp151, !tbaa !0
%tmp153 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 7
%tmp154 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp153, !tbaa !0
- %tmp155 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 7
- %tmp156 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp155, !tbaa !0
+ %tmp155 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 7
+ %tmp156 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp155, !tbaa !0
%tmp157 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 8
%tmp158 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp157, !tbaa !0
- %tmp159 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 8
- %tmp160 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp159, !tbaa !0
+ %tmp159 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 8
+ %tmp160 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp159, !tbaa !0
%tmp161 = fcmp ugt float %arg17, 0.000000e+00
%tmp162 = select i1 %tmp161, float 1.000000e+00, float 0.000000e+00
%i.i = extractelement <2 x i32> %arg6, i32 0
@@ -1144,7 +1144,7 @@ main_body:
%tmp222 = bitcast float %p2.i126 to i32
%tmp223 = insertelement <2 x i32> undef, i32 %tmp221, i32 0
%tmp224 = insertelement <2 x i32> %tmp223, i32 %tmp222, i32 1
- %tmp132.bc = bitcast <16 x i8> %tmp132 to <4 x i32>
+ %tmp132.bc = bitcast <4 x i32> %tmp132 to <4 x i32>
%tmp224.bc = bitcast <2 x i32> %tmp224 to <2 x float>
%tmp225 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp224.bc, <8 x i32> %tmp130, <4 x i32> %tmp132.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp226 = extractelement <4 x float> %tmp225, i32 0
@@ -1218,7 +1218,7 @@ LOOP: ; preds = %LOOP, %main_body
%tmp279 = insertelement <4 x i32> %tmp278, i32 %tmp277, i32 1
%tmp280 = insertelement <4 x i32> %tmp279, i32 0, i32 2
%tmp281 = insertelement <4 x i32> %tmp280, i32 undef, i32 3
- %tmp148.bc = bitcast <16 x i8> %tmp148 to <4 x i32>
+ %tmp148.bc = bitcast <4 x i32> %tmp148 to <4 x i32>
%tmp281.bc = bitcast <4 x i32> %tmp281 to <4 x float>
%tmp282 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp281.bc, <8 x i32> %tmp146, <4 x i32> %tmp148.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp283 = extractelement <4 x float> %tmp282, i32 3
@@ -1283,7 +1283,7 @@ IF189: ; preds = %LOOP
%tmp339 = bitcast float %tmp335 to i32
%tmp340 = insertelement <2 x i32> undef, i32 %tmp338, i32 0
%tmp341 = insertelement <2 x i32> %tmp340, i32 %tmp339, i32 1
- %tmp136.bc = bitcast <16 x i8> %tmp136 to <4 x i32>
+ %tmp136.bc = bitcast <4 x i32> %tmp136 to <4 x i32>
%a.bc.i = bitcast <2 x i32> %tmp341 to <2 x float>
%tmp0 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i, <8 x i32> %tmp134, <4 x i32> %tmp136.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp343 = extractelement <4 x float> %tmp0, i32 0
@@ -1317,7 +1317,7 @@ IF189: ; preds = %LOOP
%tmp359 = bitcast float %tmp337 to i32
%tmp360 = insertelement <2 x i32> undef, i32 %tmp358, i32 0
%tmp361 = insertelement <2 x i32> %tmp360, i32 %tmp359, i32 1
- %tmp152.bc = bitcast <16 x i8> %tmp152 to <4 x i32>
+ %tmp152.bc = bitcast <4 x i32> %tmp152 to <4 x i32>
%a.bc.i3 = bitcast <2 x i32> %tmp361 to <2 x float>
%tmp1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i3, <8 x i32> %tmp150, <4 x i32> %tmp152.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp363 = extractelement <4 x float> %tmp1, i32 2
@@ -1329,7 +1329,7 @@ IF189: ; preds = %LOOP
%tmp369 = bitcast float %tmp311 to i32
%tmp370 = insertelement <2 x i32> undef, i32 %tmp368, i32 0
%tmp371 = insertelement <2 x i32> %tmp370, i32 %tmp369, i32 1
- %tmp140.bc = bitcast <16 x i8> %tmp140 to <4 x i32>
+ %tmp140.bc = bitcast <4 x i32> %tmp140 to <4 x i32>
%a.bc.i2 = bitcast <2 x i32> %tmp371 to <2 x float>
%tmp2 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i2, <8 x i32> %tmp138, <4 x i32> %tmp140.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp373 = extractelement <4 x float> %tmp2, i32 0
@@ -1347,7 +1347,7 @@ IF189: ; preds = %LOOP
%tmp383 = bitcast float %tmp321 to i32
%tmp384 = insertelement <2 x i32> undef, i32 %tmp382, i32 0
%tmp385 = insertelement <2 x i32> %tmp384, i32 %tmp383, i32 1
- %tmp144.bc = bitcast <16 x i8> %tmp144 to <4 x i32>
+ %tmp144.bc = bitcast <4 x i32> %tmp144 to <4 x i32>
%a.bc.i1 = bitcast <2 x i32> %tmp385 to <2 x float>
%tmp3 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i1, <8 x i32> %tmp142, <4 x i32> %tmp144.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp387 = extractelement <4 x float> %tmp3, i32 0
@@ -1446,7 +1446,7 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp467 = bitcast float %tmp220 to i32
%tmp468 = insertelement <2 x i32> undef, i32 %tmp466, i32 0
%tmp469 = insertelement <2 x i32> %tmp468, i32 %tmp467, i32 1
- %tmp160.bc = bitcast <16 x i8> %tmp160 to <4 x i32>
+ %tmp160.bc = bitcast <4 x i32> %tmp160 to <4 x i32>
%tmp469.bc = bitcast <2 x i32> %tmp469 to <2 x float>
%tmp470 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp469.bc, <8 x i32> %tmp158, <4 x i32> %tmp160.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp471 = extractelement <4 x float> %tmp470, i32 0
@@ -1465,7 +1465,7 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp484 = bitcast float %p2.i138 to i32
%tmp485 = insertelement <2 x i32> undef, i32 %tmp483, i32 0
%tmp486 = insertelement <2 x i32> %tmp485, i32 %tmp484, i32 1
- %tmp156.bc = bitcast <16 x i8> %tmp156 to <4 x i32>
+ %tmp156.bc = bitcast <4 x i32> %tmp156 to <4 x i32>
%tmp486.bc = bitcast <2 x i32> %tmp486 to <2 x float>
%tmp487 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp486.bc, <8 x i32> %tmp154, <4 x i32> %tmp156.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp488 = extractelement <4 x float> %tmp487, i32 0
@@ -1674,7 +1674,7 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp657 = insertelement <4 x i32> %tmp656, i32 %tmp654, i32 1
%tmp658 = insertelement <4 x i32> %tmp657, i32 %tmp655, i32 2
%tmp659 = insertelement <4 x i32> %tmp658, i32 undef, i32 3
- %tmp128.bc = bitcast <16 x i8> %tmp128 to <4 x i32>
+ %tmp128.bc = bitcast <4 x i32> %tmp128 to <4 x i32>
%tmp659.bc = bitcast <4 x i32> %tmp659 to <4 x float>
%tmp660 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp659.bc, <8 x i32> %tmp126, <4 x i32> %tmp128.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp661 = extractelement <4 x float> %tmp660, i32 0
@@ -1869,7 +1869,7 @@ declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
declare <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
declare <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/si-spill-cf.ll b/test/CodeGen/AMDGPU/si-spill-cf.ll
index 926702645d9e4..2a8ced59ddef6 100644
--- a/test/CodeGen/AMDGPU/si-spill-cf.ll
+++ b/test/CodeGen/AMDGPU/si-spill-cf.ll
@@ -9,73 +9,73 @@
define amdgpu_ps void @main() #0 {
main_body:
- %tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 16)
- %tmp1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 32)
- %tmp2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 80)
- %tmp3 = call float @llvm.SI.load.const(<16 x i8> undef, i32 84)
- %tmp4 = call float @llvm.SI.load.const(<16 x i8> undef, i32 88)
- %tmp5 = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
- %tmp6 = call float @llvm.SI.load.const(<16 x i8> undef, i32 100)
- %tmp7 = call float @llvm.SI.load.const(<16 x i8> undef, i32 104)
- %tmp8 = call float @llvm.SI.load.const(<16 x i8> undef, i32 112)
- %tmp9 = call float @llvm.SI.load.const(<16 x i8> undef, i32 116)
- %tmp10 = call float @llvm.SI.load.const(<16 x i8> undef, i32 120)
- %tmp11 = call float @llvm.SI.load.const(<16 x i8> undef, i32 128)
- %tmp12 = call float @llvm.SI.load.const(<16 x i8> undef, i32 132)
- %tmp13 = call float @llvm.SI.load.const(<16 x i8> undef, i32 136)
- %tmp14 = call float @llvm.SI.load.const(<16 x i8> undef, i32 144)
- %tmp15 = call float @llvm.SI.load.const(<16 x i8> undef, i32 148)
- %tmp16 = call float @llvm.SI.load.const(<16 x i8> undef, i32 152)
- %tmp17 = call float @llvm.SI.load.const(<16 x i8> undef, i32 160)
- %tmp18 = call float @llvm.SI.load.const(<16 x i8> undef, i32 164)
- %tmp19 = call float @llvm.SI.load.const(<16 x i8> undef, i32 168)
- %tmp20 = call float @llvm.SI.load.const(<16 x i8> undef, i32 176)
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> undef, i32 180)
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> undef, i32 184)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> undef, i32 192)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> undef, i32 196)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> undef, i32 200)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> undef, i32 208)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> undef, i32 212)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> undef, i32 216)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> undef, i32 224)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> undef, i32 228)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> undef, i32 232)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> undef, i32 240)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> undef, i32 244)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> undef, i32 248)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> undef, i32 256)
- %tmp36 = call float @llvm.SI.load.const(<16 x i8> undef, i32 260)
- %tmp37 = call float @llvm.SI.load.const(<16 x i8> undef, i32 264)
- %tmp38 = call float @llvm.SI.load.const(<16 x i8> undef, i32 272)
- %tmp39 = call float @llvm.SI.load.const(<16 x i8> undef, i32 276)
- %tmp40 = call float @llvm.SI.load.const(<16 x i8> undef, i32 280)
- %tmp41 = call float @llvm.SI.load.const(<16 x i8> undef, i32 288)
- %tmp42 = call float @llvm.SI.load.const(<16 x i8> undef, i32 292)
- %tmp43 = call float @llvm.SI.load.const(<16 x i8> undef, i32 296)
- %tmp44 = call float @llvm.SI.load.const(<16 x i8> undef, i32 304)
- %tmp45 = call float @llvm.SI.load.const(<16 x i8> undef, i32 308)
- %tmp46 = call float @llvm.SI.load.const(<16 x i8> undef, i32 312)
- %tmp47 = call float @llvm.SI.load.const(<16 x i8> undef, i32 320)
- %tmp48 = call float @llvm.SI.load.const(<16 x i8> undef, i32 324)
- %tmp49 = call float @llvm.SI.load.const(<16 x i8> undef, i32 328)
- %tmp50 = call float @llvm.SI.load.const(<16 x i8> undef, i32 336)
- %tmp51 = call float @llvm.SI.load.const(<16 x i8> undef, i32 340)
- %tmp52 = call float @llvm.SI.load.const(<16 x i8> undef, i32 344)
- %tmp53 = call float @llvm.SI.load.const(<16 x i8> undef, i32 352)
- %tmp54 = call float @llvm.SI.load.const(<16 x i8> undef, i32 356)
- %tmp55 = call float @llvm.SI.load.const(<16 x i8> undef, i32 360)
- %tmp56 = call float @llvm.SI.load.const(<16 x i8> undef, i32 368)
- %tmp57 = call float @llvm.SI.load.const(<16 x i8> undef, i32 372)
- %tmp58 = call float @llvm.SI.load.const(<16 x i8> undef, i32 376)
- %tmp59 = call float @llvm.SI.load.const(<16 x i8> undef, i32 384)
- %tmp60 = call float @llvm.SI.load.const(<16 x i8> undef, i32 388)
- %tmp61 = call float @llvm.SI.load.const(<16 x i8> undef, i32 392)
- %tmp62 = call float @llvm.SI.load.const(<16 x i8> undef, i32 400)
- %tmp63 = call float @llvm.SI.load.const(<16 x i8> undef, i32 404)
- %tmp64 = call float @llvm.SI.load.const(<16 x i8> undef, i32 408)
- %tmp65 = call float @llvm.SI.load.const(<16 x i8> undef, i32 416)
- %tmp66 = call float @llvm.SI.load.const(<16 x i8> undef, i32 420)
+ %tmp = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 16)
+ %tmp1 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 32)
+ %tmp2 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 80)
+ %tmp3 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 84)
+ %tmp4 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 88)
+ %tmp5 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 96)
+ %tmp6 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 100)
+ %tmp7 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 104)
+ %tmp8 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 112)
+ %tmp9 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 116)
+ %tmp10 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 120)
+ %tmp11 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 128)
+ %tmp12 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 132)
+ %tmp13 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 136)
+ %tmp14 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 144)
+ %tmp15 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 148)
+ %tmp16 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 152)
+ %tmp17 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 160)
+ %tmp18 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 164)
+ %tmp19 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 168)
+ %tmp20 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 176)
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 180)
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 184)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 192)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 196)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 200)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 208)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 212)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 216)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 224)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 228)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 232)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 240)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 244)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 248)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 256)
+ %tmp36 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 260)
+ %tmp37 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 264)
+ %tmp38 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 272)
+ %tmp39 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 276)
+ %tmp40 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 280)
+ %tmp41 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 288)
+ %tmp42 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 292)
+ %tmp43 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 296)
+ %tmp44 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 304)
+ %tmp45 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 308)
+ %tmp46 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 312)
+ %tmp47 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 320)
+ %tmp48 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 324)
+ %tmp49 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 328)
+ %tmp50 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 336)
+ %tmp51 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 340)
+ %tmp52 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 344)
+ %tmp53 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 352)
+ %tmp54 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 356)
+ %tmp55 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 360)
+ %tmp56 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 368)
+ %tmp57 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 372)
+ %tmp58 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 376)
+ %tmp59 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 384)
+ %tmp60 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 388)
+ %tmp61 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 392)
+ %tmp62 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 400)
+ %tmp63 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 404)
+ %tmp64 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 408)
+ %tmp65 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 416)
+ %tmp66 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 420)
br label %LOOP
LOOP: ; preds = %ENDIF2795, %main_body
@@ -497,7 +497,7 @@ declare float @llvm.minnum.f32(float, float) #1
declare float @llvm.maxnum.f32(float, float) #1
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/smrd.ll b/test/CodeGen/AMDGPU/smrd.ll
index 50f72c6705982..3f1e1cacb879d 100644
--- a/test/CodeGen/AMDGPU/smrd.ll
+++ b/test/CodeGen/AMDGPU/smrd.ll
@@ -84,34 +84,34 @@ entry:
ret void
}
-; SMRD load using the load.const intrinsic with an immediate offset
+; SMRD load using the load.const.v4i32 intrinsic with an immediate offset
; GCN-LABEL: {{^}}smrd_load_const0:
; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10
-define amdgpu_ps void @smrd_load_const0(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const0(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
-; SMRD load using the load.const intrinsic with the largest possible immediate
+; SMRD load using the load.const.v4i32 intrinsic with the largest possible immediate
; offset.
; GCN-LABEL: {{^}}smrd_load_const1:
; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
-define amdgpu_ps void @smrd_load_const1(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const1(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1020)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1020)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
-; SMRD load using the load.const intrinsic with an offset greater than the
+; SMRD load using the load.const.v4i32 intrinsic with an offset greater than the
; largets possible immediate.
; immediate offset.
; GCN-LABEL: {{^}}smrd_load_const2:
@@ -119,11 +119,11 @@ main_body:
; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
-define amdgpu_ps void @smrd_load_const2(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const2(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1024)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1024)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
@@ -134,11 +134,11 @@ main_body:
; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
-define amdgpu_ps void @smrd_load_const3(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const3(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1048572)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048572)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
@@ -149,17 +149,17 @@ main_body:
; SIVI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
; GCN: s_endpgm
-define amdgpu_ps void @smrd_load_const4(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const4(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1048576)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048576)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/spill-to-smem-m0.ll b/test/CodeGen/AMDGPU/spill-to-smem-m0.ll
new file mode 100644
index 0000000000000..c6691e7bb2f84
--- /dev/null
+++ b/test/CodeGen/AMDGPU/spill-to-smem-m0.ll
@@ -0,0 +1,22 @@
+; RUN: llc -O0 -march=amdgcn -mcpu=fiji -amdgpu-spill-sgpr-to-smem=1 -verify-machineinstrs -stop-before=prologepilog < %s
+
+; Spill to SMEM clobbers M0. Check that the implicit-def dead operand is present
+; in the pseudo instructions.
+
+; CHECK-LABEL: {{^}}spill_sgpr:
+; CHECK: SI_SPILL_S32_SAVE {{.*}}, implicit-def dead %m0
+; CHECK: SI_SPILL_S32_RESTORE {{.*}}, implicit-def dead %m0
+define amdgpu_kernel void @spill_sgpr(i32 addrspace(1)* %out, i32 %in) #0 {
+ %sgpr = call i32 asm sideeffect "; def $0", "=s" () #0
+ %cmp = icmp eq i32 %in, 0
+ br i1 %cmp, label %bb0, label %ret
+
+bb0:
+ call void asm sideeffect "; use $0", "s"(i32 %sgpr) #0
+ br label %ret
+
+ret:
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/split-smrd.ll b/test/CodeGen/AMDGPU/split-smrd.ll
index cdb1b1e3b5032..5fc69067760a0 100644
--- a/test/CodeGen/AMDGPU/split-smrd.ll
+++ b/test/CodeGen/AMDGPU/split-smrd.ll
@@ -8,7 +8,7 @@
; GCN: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1
define amdgpu_ps void @split_smrd_add_worklist([34 x <8 x i32>] addrspace(2)* byval %arg) #0 {
bb:
- %tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
+ %tmp = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 96)
%tmp1 = bitcast float %tmp to i32
br i1 undef, label %bb2, label %bb3
@@ -31,7 +31,7 @@ bb3: ; preds = %bb
declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
index c9c8583d5e879..ca2366a361fbf 100644
--- a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
+++ b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
@@ -27,17 +27,17 @@
; GCN: NumVgprs: 256
; GCN: ScratchSize: 1536
-define amdgpu_vs void @main([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <16 x i8>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 {
+define amdgpu_vs void @main([9 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <4 x i32>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 {
bb:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i64 0, i64 0
- %tmp11 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, align 16, !tbaa !0
- %tmp12 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 0)
- %tmp13 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 16)
- %tmp14 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 32)
- %tmp15 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 0
- %tmp16 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp15, align 16, !tbaa !0
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg1, i64 0, i64 0
+ %tmp11 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, align 16, !tbaa !0
+ %tmp12 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp11, i32 0)
+ %tmp13 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp11, i32 16)
+ %tmp14 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp11, i32 32)
+ %tmp15 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(2)* %arg4, i64 0, i64 0
+ %tmp16 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp15, align 16, !tbaa !0
%tmp17 = add i32 %arg5, %arg7
- %tmp16.cast = bitcast <16 x i8> %tmp16 to <4 x i32>
+ %tmp16.cast = bitcast <4 x i32> %tmp16 to <4 x i32>
%tmp18 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp16.cast, i32 %tmp17, i32 0, i1 false, i1 false)
%tmp19 = extractelement <4 x float> %tmp18, i32 0
%tmp20 = extractelement <4 x float> %tmp18, i32 1
@@ -488,7 +488,7 @@ bb157: ; preds = %bb24
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2
attributes #0 = { nounwind }
diff --git a/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll b/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
index ff3b7e16188e6..fefe16747f106 100644
--- a/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
+++ b/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
@@ -24,7 +24,7 @@ entry:
; CHECK-LABEL: caller:
define void @caller() {
-; CHECK: ldm r0, {r1, r2, r3}
+; CHECK: ldm r{{[0-9]+}}, {r1, r2, r3}
call void @t(i32 0, %struct.s* @v);
ret void
}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
index 16642d85d9cfd..6a1da0dfe85f3 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
@@ -42,6 +42,9 @@
define void @test_constant_imm() { ret void }
define void @test_constant_cimm() { ret void }
+ define void @test_select_s32() { ret void }
+ define void @test_select_ptr() { ret void }
+
define void @test_soft_fp_double() #0 { ret void }
attributes #0 = { "target-features"="+vfp2,-neonfp" }
@@ -1100,6 +1103,76 @@ body: |
BX_RET 14, _, implicit %r0
...
---
+name: test_select_s32
+# CHECK-LABEL: name: test_select_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(s32) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(s32) = COPY %r1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
+
+ %2(s1) = COPY %r2
+ ; CHECK: [[VREGC:%[0-9]+]] = COPY %r2
+
+ %3(s32) = G_SELECT %2(s1), %0, %1
+ ; CHECK: CMPri [[VREGC]], 0, 14, _, implicit-def %cpsr
+ ; CHECK: [[RES:%[0-9]+]] = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RES]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_select_ptr
+# CHECK-LABEL: name: test_select_ptr
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(p0) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(p0) = COPY %r1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
+
+ %2(s1) = COPY %r2
+ ; CHECK: [[VREGC:%[0-9]+]] = COPY %r2
+
+ %3(p0) = G_SELECT %2(s1), %0, %1
+ ; CHECK: CMPri [[VREGC]], 0, 14, _, implicit-def %cpsr
+ ; CHECK: [[RES:%[0-9]+]] = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr
+
+ %r0 = COPY %3(p0)
+ ; CHECK: %r0 = COPY [[RES]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
name: test_soft_fp_double
# CHECK-LABEL: name: test_soft_fp_double
legalized: true
diff --git a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
index 0ff8d52e94c62..f50916e4b4741 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
@@ -910,7 +910,7 @@ define arm_aapcscc {i32, i32} @test_structs({i32, i32} %x) {
define i32 @test_shufflevector_s32_v2s32(i32 %arg) {
; CHECK-LABEL: name: test_shufflevector_s32_v2s32
; CHECK: [[ARG:%[0-9]+]](s32) = COPY %r0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32)
; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>)
@@ -925,7 +925,7 @@ define i32 @test_shufflevector_v2s32_v3s32(i32 %arg1, i32 %arg2) {
; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %r0
; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[MASK:%[0-9]+]](<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32)
@@ -945,7 +945,7 @@ define i32 @test_shufflevector_v2s32_v4s32(i32 %arg1, i32 %arg2) {
; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32
; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %r0
; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[MASK:%[0-9]+]](<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32), [[C0]](s32), [[C0]](s32)
@@ -966,7 +966,7 @@ define i32 @test_shufflevector_v4s32_v2s32(i32 %arg1, i32 %arg2, i32 %arg3, i32
; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
; CHECK: [[ARG3:%[0-9]+]](s32) = COPY %r2
; CHECK: [[ARG4:%[0-9]+]](s32) = COPY %r3
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2
@@ -1009,7 +1009,7 @@ define i32 @test_constantstruct_v2s32_s32_s32() {
; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32)
; CHECK: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
; CHECK: [[C4:%[0-9]+]](s32) = G_CONSTANT i32 4
-; CHECK: [[C5:%[0-9]+]](s128) = IMPLICIT_DEF
+; CHECK: [[C5:%[0-9]+]](s128) = G_IMPLICIT_DEF
; CHECK: [[C6:%[0-9]+]](s128) = G_INSERT [[C5]], [[VEC]](<2 x s32>), 0
; CHECK: [[C7:%[0-9]+]](s128) = G_INSERT [[C6]], [[C3]](s32), 64
; CHECK: [[C8:%[0-9]+]](s128) = G_INSERT [[C7]], [[C4]](s32), 96
diff --git a/test/CodeGen/ARM/GlobalISel/arm-isel.ll b/test/CodeGen/ARM/GlobalISel/arm-isel.ll
index 76fb39ecea013..4c498ff6ca9bf 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-isel.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-isel.ll
@@ -400,3 +400,23 @@ entry:
%r = zext i1 %v to i32
ret i32 %r
}
+
+define arm_aapcscc i32 @test_select_i32(i32 %a, i32 %b, i1 %cond) {
+; CHECK-LABEL: test_select_i32
+; CHECK: cmp r2, #0
+; CHECK: moveq r0, r1
+; CHECK: bx lr
+entry:
+ %r = select i1 %cond, i32 %a, i32 %b
+ ret i32 %r
+}
+
+define arm_aapcscc i32* @test_select_ptr(i32* %a, i32* %b, i1 %cond) {
+; CHECK-LABEL: test_select_ptr
+; CHECK: cmp r2, #0
+; CHECK: moveq r0, r1
+; CHECK: bx lr
+entry:
+ %r = select i1 %cond, i32* %a, i32* %b
+ ret i32* %r
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index 2def31eb15929..bf759728c3658 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -39,6 +39,9 @@
define void @test_icmp_s16() { ret void }
define void @test_icmp_s32() { ret void }
+ define void @test_select_s32() { ret void }
+ define void @test_select_ptr() { ret void }
+
define void @test_fadd_s32() #0 { ret void }
define void @test_fadd_s64() #0 { ret void }
@@ -775,6 +778,58 @@ body: |
BX_RET 14, _, implicit %r0
...
---
+name: test_select_s32
+# CHECK-LABEL: name: test_select_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s1) = COPY %r2
+ %3(s32) = G_SELECT %2(s1), %0, %1
+ ; G_SELECT with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}(s32) = G_SELECT {{%[0-9]+}}(s1), {{%[0-9]+}}, {{%[0-9]+}}
+ %r0 = COPY %3(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_select_ptr
+# CHECK-LABEL: name: test_select_ptr
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(p0) = COPY %r0
+ %1(p0) = COPY %r1
+ %2(s1) = COPY %r2
+ %3(p0) = G_SELECT %2(s1), %0, %1
+ ; G_SELECT with p0 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}(p0) = G_SELECT {{%[0-9]+}}(s1), {{%[0-9]+}}, {{%[0-9]+}}
+ %r0 = COPY %3(p0)
+ BX_RET 14, _, implicit %r0
+...
+---
name: test_fadd_s32
# CHECK-LABEL: name: test_fadd_s32
legalized: false
diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
index d97dd60bac223..d3b93e488ef47 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
@@ -36,6 +36,8 @@
define void @test_icmp_eq_s32() { ret void }
+ define void @test_select_s32() { ret void }
+
define void @test_fadd_s32() #0 { ret void }
define void @test_fadd_s64() #0 { ret void }
@@ -741,6 +743,35 @@ body: |
...
---
+name: test_select_s32
+# CHECK-LABEL: name: test_select_s32
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb, preferred-register: '' }
+# CHECK: - { id: 1, class: gprb, preferred-register: '' }
+# CHECK: - { id: 2, class: gprb, preferred-register: '' }
+# CHECK: - { id: 3, class: gprb, preferred-register: '' }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s1) = COPY %r2
+ %3(s32) = G_SELECT %2(s1), %0, %1
+ %r0 = COPY %3(s32)
+ BX_RET 14, _, implicit %r0
+
+...
+---
name: test_fadd_s32
# CHECK-LABEL: name: test_fadd_s32
legalized: true
diff --git a/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll b/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
index d303e9da8604e..a73a7cf8414fc 100644
--- a/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
+++ b/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
@@ -19,9 +19,9 @@ entry:
; CHECK-LABEL: isel
; CHECK: push {r4, r5}
-; CHECK: movw r4, #{{\d*}}
; CHECK: movw r12, #0
; CHECK: movt r12, #0
+; CHECK: movw r4, #{{\d*}}
; CHECK: blx r12
; CHECK: sub.w sp, sp, r4
diff --git a/test/CodeGen/ARM/Windows/no-arm-mode.ll b/test/CodeGen/ARM/Windows/no-arm-mode.ll
deleted file mode 100644
index 30353640a4cc3..0000000000000
--- a/test/CodeGen/ARM/Windows/no-arm-mode.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llc -mtriple=armv7-windows-itanium -mcpu=cortex-a9 -o /dev/null %s 2>&1 \
-; RUN: | FileCheck %s -check-prefix CHECK-WIN
-
-; RUN: not llc -mtriple=armv7-windows-gnu -mcpu=cortex-a9 -o /dev/null %s 2>&1 \
-; RUN: | FileCheck %s -check-prefix CHECK-GNU
-
-; CHECK-WIN: does not support ARM mode execution
-
-; CHECK-GNU: does not support ARM mode execution
-
diff --git a/test/CodeGen/ARM/Windows/tls.ll b/test/CodeGen/ARM/Windows/tls.ll
index 947e29dfa65c8..2c38ad3e58f76 100644
--- a/test/CodeGen/ARM/Windows/tls.ll
+++ b/test/CodeGen/ARM/Windows/tls.ll
@@ -15,11 +15,11 @@ define i32 @f() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -36,11 +36,11 @@ define i32 @e() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -57,11 +57,11 @@ define i32 @d() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -78,11 +78,11 @@ define i32 @c() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -99,11 +99,11 @@ define i32 @b() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -120,11 +120,11 @@ define i16 @a() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -141,11 +141,11 @@ define i8 @Z() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
diff --git a/test/CodeGen/ARM/alloca.ll b/test/CodeGen/ARM/alloca.ll
index 4a0835a2c0caf..82b6b11ea4b2b 100644
--- a/test/CodeGen/ARM/alloca.ll
+++ b/test/CodeGen/ARM/alloca.ll
@@ -2,11 +2,11 @@
define void @f(i32 %a) {
entry:
-; CHECK: add r11, sp, #4
+; CHECK: add r11, sp, #8
%tmp = alloca i8, i32 %a ; <i8*> [#uses=1]
call void @g( i8* %tmp, i32 %a, i32 1, i32 2, i32 3 )
ret void
-; CHECK: sub sp, r11, #4
+; CHECK: sub sp, r11, #8
}
declare void @g(i8*, i32, i32, i32, i32)
diff --git a/test/CodeGen/ARM/arg-copy-elide.ll b/test/CodeGen/ARM/arg-copy-elide.ll
index 739b560b0833f..625b570734068 100644
--- a/test/CodeGen/ARM/arg-copy-elide.ll
+++ b/test/CodeGen/ARM/arg-copy-elide.ll
@@ -31,8 +31,8 @@ entry:
; CHECK-LABEL: use_arg:
; CHECK: push {[[csr:[^ ]*]], lr}
-; CHECK: ldr [[csr]], [sp, #8]
; CHECK: add r0, sp, #8
+; CHECK: ldr [[csr]], [sp, #8]
; CHECK: bl addrof_i32
; CHECK: mov r0, [[csr]]
; CHECK: pop {[[csr]], pc}
@@ -50,8 +50,8 @@ entry:
; CHECK: push {r4, r5, r11, lr}
; CHECK: sub sp, sp, #8
; CHECK: ldr r4, [sp, #28]
-; CHECK: ldr r5, [sp, #24]
; CHECK: mov r0, sp
+; CHECK: ldr r5, [sp, #24]
; CHECK: str r4, [sp, #4]
; CHECK: str r5, [sp]
; CHECK: bl addrof_i64
diff --git a/test/CodeGen/ARM/arm-abi-attr.ll b/test/CodeGen/ARM/arm-abi-attr.ll
index 61cb6cefa170a..f05e6e788d6fc 100644
--- a/test/CodeGen/ARM/arm-abi-attr.ll
+++ b/test/CodeGen/ARM/arm-abi-attr.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm-linux-gnu < %s | FileCheck %s --check-prefix=APCS
+; RUN: llc -mtriple=arm-linux-gnu < %s | FileCheck %s --check-prefix=AAPCS
; RUN: llc -mtriple=arm-linux-gnu -target-abi=apcs < %s | \
; RUN: FileCheck %s --check-prefix=APCS
; RUN: llc -mtriple=arm-linux-gnueabi -target-abi=apcs < %s | \
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 31691e9468c9e..af05392c98a53 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -171,8 +171,8 @@ define i32 @test_tst_assessment(i32 %a, i32 %b) {
;
; V8-LABEL: test_tst_assessment:
; V8: @ BB#0:
-; V8-NEXT: lsls r1, r1, #31
; V8-NEXT: and r0, r0, #1
+; V8-NEXT: lsls r1, r1, #31
; V8-NEXT: it ne
; V8-NEXT: subne r0, #1
; V8-NEXT: bx lr
diff --git a/test/CodeGen/ARM/arm-position-independence-jump-table.ll b/test/CodeGen/ARM/arm-position-independence-jump-table.ll
index 790b4f41776ef..afc2d38be18c0 100644
--- a/test/CodeGen/ARM/arm-position-independence-jump-table.ll
+++ b/test/CodeGen/ARM/arm-position-independence-jump-table.ll
@@ -47,8 +47,8 @@ lab4:
; CHECK-LABEL: jump_table:
-; ARM: lsl r[[R_TAB_IDX:[0-9]+]], r{{[0-9]+}}, #2
; ARM: adr r[[R_TAB_BASE:[0-9]+]], [[LJTI:\.LJTI[0-9]+_[0-9]+]]
+; ARM: lsl r[[R_TAB_IDX:[0-9]+]], r{{[0-9]+}}, #2
; ARM_ABS: ldr pc, [r[[R_TAB_IDX]], r[[R_TAB_BASE]]]
; ARM_PC: ldr r[[R_OFFSET:[0-9]+]], [r[[R_TAB_IDX]], r[[R_TAB_BASE]]]
; ARM_PC: add pc, r[[R_OFFSET]], r[[R_TAB_BASE]]
diff --git a/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll b/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
index 1434f40137b51..7007018dd0b29 100644
--- a/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
+++ b/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
@@ -45,15 +45,19 @@ target triple = "armv7--linux-gnueabi"
; CHECK: @ %while.cond2
; CHECK: add
; CHECK-NEXT: cmp r{{[0-1]+}}, #1
-; Set the return value.
-; CHECK-NEXT: moveq r0,
-; CHECK-NEXT: popeq
+; Jump to the return block
+; CHECK-NEXT: beq [[RETURN_BLOCK:[.a-zA-Z0-9_]+]]
;
; Use the back edge to check we get the label of the loop right.
; This is to make sure we check the right loop pattern.
; CHECK: @ %while.body24.land.rhs14_crit_edge
; CHECK: cmp r{{[0-9]+}}, #192
; CHECK-NEXT bhs [[LOOP_HEADER]]
+;
+; CHECK: [[RETURN_BLOCK]]:
+; Set the return value.
+; CHECK-NEXT: mov r0,
+; CHECK-NEXT: pop
define fastcc i8* @wrongUseOfPostDominate(i8* readonly %s, i32 %off, i8* readnone %lim) {
entry:
%cmp = icmp sgt i32 %off, -1
diff --git a/test/CodeGen/ARM/atomic-cmpxchg.ll b/test/CodeGen/ARM/atomic-cmpxchg.ll
index e026bae361e19..a136e44fc1960 100644
--- a/test/CodeGen/ARM/atomic-cmpxchg.ll
+++ b/test/CodeGen/ARM/atomic-cmpxchg.ll
@@ -70,8 +70,8 @@ entry:
; CHECK-ARMV7-NEXT: ldrexb [[SUCCESS]], [r0]
; CHECK-ARMV7-NEXT: cmp [[SUCCESS]], r1
; CHECK-ARMV7-NEXT: beq [[HEAD]]
-; CHECK-ARMV7-NEXT: clrex
; CHECK-ARMV7-NEXT: mov r0, #0
+; CHECK-ARMV7-NEXT: clrex
; CHECK-ARMV7-NEXT: bx lr
; CHECK-THUMBV7-LABEL: test_cmpxchg_res_i8:
@@ -88,6 +88,6 @@ entry:
; CHECK-THUMBV7-NEXT: ldrexb [[LD:r[0-9]+]], [r0]
; CHECK-THUMBV7-NEXT: cmp [[LD]], [[DESIRED]]
; CHECK-THUMBV7-NEXT: beq [[TRYST:.LBB[0-9_]+]]
-; CHECK-THUMBV7-NEXT: clrex
; CHECK-THUMBV7-NEXT: movs r0, #0
+; CHECK-THUMBV7-NEXT: clrex
; CHECK-THUMBV7-NEXT: bx lr
diff --git a/test/CodeGen/ARM/bool-ext-inc.ll b/test/CodeGen/ARM/bool-ext-inc.ll
index 5f2ba8b109a76..ca9c9ab079db0 100644
--- a/test/CodeGen/ARM/bool-ext-inc.ll
+++ b/test/CodeGen/ARM/bool-ext-inc.ll
@@ -16,8 +16,8 @@ define <4 x i32> @sext_inc_vec(<4 x i1> %x) {
; CHECK: @ BB#0:
; CHECK-NEXT: vmov.i16 d16, #0x1
; CHECK-NEXT: vmov d17, r0, r1
-; CHECK-NEXT: vmov.i32 q9, #0x1
; CHECK-NEXT: veor d16, d17, d16
+; CHECK-NEXT: vmov.i32 q9, #0x1
; CHECK-NEXT: vmovl.u16 q8, d16
; CHECK-NEXT: vand q8, q8, q9
; CHECK-NEXT: vmov r0, r1, d16
@@ -31,13 +31,13 @@ define <4 x i32> @sext_inc_vec(<4 x i1> %x) {
define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmpgt_sext_inc_vec:
; CHECK: @ BB#0:
-; CHECK-NEXT: mov r12, sp
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: vmov.i32 q10, #0x1
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vcge.s32 q8, q8, q9
-; CHECK-NEXT: vand q8, q8, q10
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: mov r0, sp
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vcge.s32 q8, q9, q8
+; CHECK-NEXT: vmov.i32 q9, #0x1
+; CHECK-NEXT: vand q8, q8, q9
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
@@ -50,13 +50,13 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmpne_sext_inc_vec:
; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: mov r12, sp
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: vmov.i32 q10, #0x1
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vceq.i32 q8, q9, q8
-; CHECK-NEXT: vand q8, q8, q10
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vceq.i32 q8, q8, q9
+; CHECK-NEXT: vmov.i32 q9, #0x1
+; CHECK-NEXT: vand q8, q8, q9
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
diff --git a/test/CodeGen/ARM/cmpxchg-O0-be.ll b/test/CodeGen/ARM/cmpxchg-O0-be.ll
new file mode 100644
index 0000000000000..9e9a93e19b6a4
--- /dev/null
+++ b/test/CodeGen/ARM/cmpxchg-O0-be.ll
@@ -0,0 +1,26 @@
+; RUN: llc -verify-machineinstrs -mtriple=armebv8-linux-gnueabi -O0 %s -o - | FileCheck %s
+
+@x = global i64 10, align 8
+@y = global i64 20, align 8
+@z = global i64 20, align 8
+
+; CHECK_LABEL: main:
+; CHECK: ldr [[R2:r[0-9]+]], {{\[}}[[R1:r[0-9]+]]{{\]}}
+; CHECK-NEXT: ldr [[R1]], {{\[}}[[R1]], #4]
+; CHECK: mov [[R4:r[0-9]+]], [[R2]]
+; CHECK-NEXT: mov [[R5:r[0-9]+]], [[R1]]
+; CHECK: ldr [[R2]], {{\[}}[[R1]]{{\]}}
+; CHECK-NEXT: ldr [[R1]], {{\[}}[[R1]], #4]
+; CHECK: mov [[R6:r[0-9]+]], [[R2]]
+; CHECK-NEXT: mov [[R7:r[0-9]+]], [[R1]]
+
+define arm_aapcs_vfpcc i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval, align 4
+ %0 = load i64, i64* @z, align 8
+ %1 = load i64, i64* @x, align 8
+ %2 = cmpxchg i64* @y, i64 %0, i64 %1 seq_cst seq_cst
+ %3 = extractvalue { i64, i1 } %2, 1
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/cmpxchg-weak.ll b/test/CodeGen/ARM/cmpxchg-weak.ll
index 0d5681aafbcb0..29d97fef06068 100644
--- a/test/CodeGen/ARM/cmpxchg-weak.ll
+++ b/test/CodeGen/ARM/cmpxchg-weak.ll
@@ -47,12 +47,12 @@ define i1 @test_cmpxchg_weak_to_bool(i32, i32 *%addr, i32 %desired, i32 %new) {
; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r3, [r1]
; CHECK-NEXT: cmp [[SUCCESS]], #0
; CHECK-NEXT: bxne lr
-; CHECK-NEXT: dmb ish
; CHECK-NEXT: mov r0, #1
+; CHECK-NEXT: dmb ish
; CHECK-NEXT: bx lr
; CHECK-NEXT: [[LDFAILBB]]:
-; CHECK-NEXT: clrex
; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: clrex
; CHECK-NEXT: bx lr
ret i1 %success
diff --git a/test/CodeGen/ARM/code-placement.ll b/test/CodeGen/ARM/code-placement.ll
index b9d90249e9f6a..b381aecc69a61 100644
--- a/test/CodeGen/ARM/code-placement.ll
+++ b/test/CodeGen/ARM/code-placement.ll
@@ -38,9 +38,8 @@ entry:
br i1 %0, label %bb5, label %bb.nph15
bb1: ; preds = %bb2.preheader, %bb1
-; CHECK: LBB1_[[BB3:.]]: @ %bb3
; CHECK: LBB1_[[PREHDR:.]]: @ %bb2.preheader
-; CHECK: blt LBB1_[[BB3]]
+; CHECK: blt LBB1_[[BB3:.]]
%indvar = phi i32 [ %indvar.next, %bb1 ], [ 0, %bb2.preheader ] ; <i32> [#uses=2]
%sum.08 = phi i32 [ %2, %bb1 ], [ %sum.110, %bb2.preheader ] ; <i32> [#uses=1]
%tmp17 = sub i32 %i.07, %indvar ; <i32> [#uses=1]
@@ -54,7 +53,7 @@ bb1: ; preds = %bb2.preheader, %bb1
bb3: ; preds = %bb1, %bb2.preheader
; CHECK: LBB1_[[BB1:.]]: @ %bb1
; CHECK: bne LBB1_[[BB1]]
-; CHECK: b LBB1_[[BB3]]
+; CHECK: LBB1_[[BB3]]: @ %bb3
%sum.0.lcssa = phi i32 [ %sum.110, %bb2.preheader ], [ %2, %bb1 ] ; <i32> [#uses=2]
%3 = add i32 %pass.011, 1 ; <i32> [#uses=2]
%exitcond18 = icmp eq i32 %3, %passes ; <i1> [#uses=1]
diff --git a/test/CodeGen/ARM/constantfp.ll b/test/CodeGen/ARM/constantfp.ll
index 0b431f47f50bf..f825061d11693 100644
--- a/test/CodeGen/ARM/constantfp.ll
+++ b/test/CodeGen/ARM/constantfp.ll
@@ -5,25 +5,25 @@
; RUN: llc -mtriple=thumbv7m -mcpu=cortex-m4 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-NO-XO %s
-; RUN: llc -mtriple=thumbv7m -arm-execute-only -mcpu=cortex-m4 %s -o - \
+; RUN: llc -mtriple=thumbv7m -mattr=+execute-only -mcpu=cortex-m4 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-FLOAT --check-prefix=CHECK-XO-DOUBLE %s
-; RUN: llc -mtriple=thumbv7meb -arm-execute-only -mcpu=cortex-m4 %s -o - \
+; RUN: llc -mtriple=thumbv7meb -mattr=+execute-only -mcpu=cortex-m4 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-FLOAT --check-prefix=CHECK-XO-DOUBLE-BE %s
-; RUN: llc -mtriple=thumbv7m -arm-execute-only -mcpu=cortex-m4 -relocation-model=ropi %s -o - \
+; RUN: llc -mtriple=thumbv7m -mattr=+execute-only -mcpu=cortex-m4 -relocation-model=ropi %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-ROPI %s
; RUN: llc -mtriple=thumbv8m.main -mattr=fp-armv8 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-NO-XO %s
-; RUN: llc -mtriple=thumbv8m.main -arm-execute-only -mattr=fp-armv8 %s -o - \
+; RUN: llc -mtriple=thumbv8m.main -mattr=+execute-only -mattr=fp-armv8 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-FLOAT --check-prefix=CHECK-XO-DOUBLE %s
-; RUN: llc -mtriple=thumbv8m.maineb -arm-execute-only -mattr=fp-armv8 %s -o - \
+; RUN: llc -mtriple=thumbv8m.maineb -mattr=+execute-only -mattr=fp-armv8 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-FLOAT --check-prefix=CHECK-XO-DOUBLE-BE %s
-; RUN: llc -mtriple=thumbv8m.main -arm-execute-only -mattr=fp-armv8 -relocation-model=ropi %s -o - \
+; RUN: llc -mtriple=thumbv8m.main -mattr=+execute-only -mattr=fp-armv8 -relocation-model=ropi %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-ROPI %s
define arm_aapcs_vfpcc float @test_vmov_f32() {
diff --git a/test/CodeGen/ARM/cortex-a57-misched-basic.ll b/test/CodeGen/ARM/cortex-a57-misched-basic.ll
index 2ec50b9d3343c..cfbef7bd42937 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-basic.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-basic.ll
@@ -8,14 +8,14 @@
; CHECK: ********** MI Scheduling **********
; CHECK: foo:BB#0 entry
-; GENERIC: SDIV
+; GENERIC: LDRi12
; GENERIC: Latency : 1
; GENERIC: EORrr
; GENERIC: Latency : 1
-; GENERIC: LDRi12
-; GENERIC: Latency : 4
; GENERIC: ADDrr
; GENERIC: Latency : 1
+; GENERIC: SDIV
+; GENERIC: Latency : 0
; GENERIC: SUBrr
; GENERIC: Latency : 1
diff --git a/test/CodeGen/ARM/cortexr52-misched-basic.ll b/test/CodeGen/ARM/cortexr52-misched-basic.ll
index eb2c29a3a5d19..614157eb0e105 100644
--- a/test/CodeGen/ARM/cortexr52-misched-basic.ll
+++ b/test/CodeGen/ARM/cortexr52-misched-basic.ll
@@ -12,10 +12,10 @@
; GENERIC: Latency : 1
; R52_SCHED: Latency : 3
; CHECK: MLA
-; GENERIC: Latency : 1
+; GENERIC: Latency : 2
; R52_SCHED: Latency : 4
; CHECK: SDIV
-; GENERIC: Latency : 1
+; GENERIC: Latency : 0
; R52_SCHED: Latency : 8
; CHECK: ** Final schedule for BB#0 ***
; GENERIC: EORrr
diff --git a/test/CodeGen/ARM/ctor_order.ll b/test/CodeGen/ARM/ctor_order.ll
index 7fcc8cba0c8f3..0cf87d7a97b77 100644
--- a/test/CodeGen/ARM/ctor_order.ll
+++ b/test/CodeGen/ARM/ctor_order.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=dynamic-no-pic | FileCheck %s --check-prefix=DARWIN
; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=static | FileCheck %s -check-prefix=DARWIN-STATIC
-; RUN: llc < %s -mtriple=arm-linux-gnu | FileCheck %s -check-prefix=ELF
+; RUN: llc < %s -mtriple=arm-linux-gnu -target-abi=apcs | FileCheck %s -check-prefix=ELF
; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=GNUEABI
; DARWIN: .section __DATA,__mod_init_func,mod_init_funcs
diff --git a/test/CodeGen/ARM/ctors_dtors.ll b/test/CodeGen/ARM/ctors_dtors.ll
index fb94626ab7dd0..c097ade3c846c 100644
--- a/test/CodeGen/ARM/ctors_dtors.ll
+++ b/test/CodeGen/ARM/ctors_dtors.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
-; RUN: llc < %s -mtriple=arm-linux-gnu | FileCheck %s -check-prefix=ELF
+; RUN: llc < %s -mtriple=arm-linux-gnu -target-abi=apcs | FileCheck %s -check-prefix=ELF
; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=GNUEABI
; DARWIN: .section __DATA,__mod_init_func,mod_init_funcs
diff --git a/test/CodeGen/ARM/cttz.ll b/test/CodeGen/ARM/cttz.ll
index dacfca5059315..cba7be5833100 100644
--- a/test/CodeGen/ARM/cttz.ll
+++ b/test/CodeGen/ARM/cttz.ll
@@ -40,8 +40,8 @@ define i64 @test_i64(i64 %a) {
; CHECK-LABEL: test_i64:
; CHECK: rbit
; CHECK: rbit
-; CHECK: cmp
; CHECK: clz
+; CHECK: cmp
; CHECK: add
; CHECK: clzne
%tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
@@ -81,8 +81,8 @@ define i64 @test_i64_zero_undef(i64 %a) {
; CHECK-LABEL: test_i64_zero_undef:
; CHECK: rbit
; CHECK: rbit
-; CHECK: cmp
; CHECK: clz
+; CHECK: cmp
; CHECK: add
; CHECK: clzne
%tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
diff --git a/test/CodeGen/ARM/cttz_vector.ll b/test/CodeGen/ARM/cttz_vector.ll
index 9480d75db47a6..bed6449804152 100644
--- a/test/CodeGen/ARM/cttz_vector.ll
+++ b/test/CodeGen/ARM/cttz_vector.ll
@@ -168,17 +168,17 @@ define void @test_v4i32(<4 x i32>* %p) {
define void @test_v1i64(<1 x i64>* %p) {
; CHECK-LABEL: test_v1i64:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
; CHECK: vmov.i32 [[D2:d[0-9]+]], #0x0
+; CHECK: vldr [[D1:d[0-9]+]], [r0]
; CHECK: vmov.i64 [[D3:d[0-9]+]], #0xffffffffffffffff
; CHECK: vsub.i64 [[D2]], [[D2]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D2]]
-; CHECK: vadd.i64 [[D1]], [[D1]], [[D3]]
-; CHECK: vcnt.8 [[D1]], [[D1]]
-; CHECK: vpaddl.u8 [[D1]], [[D1]]
-; CHECK: vpaddl.u16 [[D1]], [[D1]]
-; CHECK: vpaddl.u32 [[D1]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: vand [[D2]], [[D1]], [[D2]]
+; CHECK: vadd.i64 [[D2]], [[D2]], [[D3]]
+; CHECK: vcnt.8 [[D2]], [[D2]]
+; CHECK: vpaddl.u8 [[D2]], [[D2]]
+; CHECK: vpaddl.u16 [[D2]], [[D2]]
+; CHECK: vpaddl.u32 [[D2]], [[D2]]
+; CHECK: vstr [[D2]], [r0]
%a = load <1 x i64>, <1 x i64>* %p
%tmp = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %a, i1 false)
store <1 x i64> %tmp, <1 x i64>* %p
@@ -187,17 +187,17 @@ define void @test_v1i64(<1 x i64>* %p) {
define void @test_v2i64(<2 x i64>* %p) {
; CHECK-LABEL: test_v2i64:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
; CHECK: vmov.i32 [[Q2:q[0-9]+]], #0x0
+; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
; CHECK: vmov.i64 [[Q3:q[0-9]+]], #0xffffffffffffffff
; CHECK: vsub.i64 [[Q2]], [[Q2]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vadd.i64 [[Q1]], [[Q1]], [[Q3]]
-; CHECK: vcnt.8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u16 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u32 [[Q1]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: vand [[Q2]], [[Q1]], [[Q2]]
+; CHECK: vadd.i64 [[Q2]], [[Q2]], [[Q3]]
+; CHECK: vcnt.8 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u8 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u16 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u32 [[Q2]], [[Q2]]
+; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
%a = load <2 x i64>, <2 x i64>* %p
%tmp = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 false)
store <2 x i64> %tmp, <2 x i64>* %p
@@ -346,17 +346,17 @@ define void @test_v4i32_zero_undef(<4 x i32>* %p) {
define void @test_v1i64_zero_undef(<1 x i64>* %p) {
; CHECK-LABEL: test_v1i64_zero_undef:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
; CHECK: vmov.i32 [[D2:d[0-9]+]], #0x0
+; CHECK: vldr [[D1:d[0-9]+]], [r0]
; CHECK: vmov.i64 [[D3:d[0-9]+]], #0xffffffffffffffff
; CHECK: vsub.i64 [[D2]], [[D2]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D2]]
-; CHECK: vadd.i64 [[D1]], [[D1]], [[D3]]
-; CHECK: vcnt.8 [[D1]], [[D1]]
-; CHECK: vpaddl.u8 [[D1]], [[D1]]
-; CHECK: vpaddl.u16 [[D1]], [[D1]]
-; CHECK: vpaddl.u32 [[D1]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: vand [[D2]], [[D1]], [[D2]]
+; CHECK: vadd.i64 [[D2]], [[D2]], [[D3]]
+; CHECK: vcnt.8 [[D2]], [[D2]]
+; CHECK: vpaddl.u8 [[D2]], [[D2]]
+; CHECK: vpaddl.u16 [[D2]], [[D2]]
+; CHECK: vpaddl.u32 [[D2]], [[D2]]
+; CHECK: vstr [[D2]], [r0]
%a = load <1 x i64>, <1 x i64>* %p
%tmp = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %a, i1 true)
store <1 x i64> %tmp, <1 x i64>* %p
@@ -365,17 +365,17 @@ define void @test_v1i64_zero_undef(<1 x i64>* %p) {
define void @test_v2i64_zero_undef(<2 x i64>* %p) {
; CHECK-LABEL: test_v2i64_zero_undef:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
; CHECK: vmov.i32 [[Q2:q[0-9]+]], #0x0
+; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
; CHECK: vmov.i64 [[Q3:q[0-9]+]], #0xffffffffffffffff
; CHECK: vsub.i64 [[Q2]], [[Q2]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vadd.i64 [[Q1]], [[Q1]], [[Q3]]
-; CHECK: vcnt.8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u16 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u32 [[Q1]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: vand [[Q2]], [[Q1]], [[Q2]]
+; CHECK: vadd.i64 [[Q2]], [[Q2]], [[Q3]]
+; CHECK: vcnt.8 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u8 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u16 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u32 [[Q2]], [[Q2]]
+; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
%a = load <2 x i64>, <2 x i64>* %p
%tmp = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 true)
store <2 x i64> %tmp, <2 x i64>* %p
diff --git a/test/CodeGen/ARM/cxx-tlscc.ll b/test/CodeGen/ARM/cxx-tlscc.ll
index 6a5aa12ac5a64..6a66c5f197ef2 100644
--- a/test/CodeGen/ARM/cxx-tlscc.ll
+++ b/test/CodeGen/ARM/cxx-tlscc.ll
@@ -26,7 +26,7 @@ declare i32 @_tlv_atexit(void (i8*)*, i8*, i8*)
; THUMB-LABEL: _ZTW2sg
; THUMB: push {{.*}}lr
; THUMB: blx
-; THUMB: bne [[TH_end:.?LBB0_[0-9]+]]
+; THUMB: bne{{(.w)?}} [[TH_end:.?LBB0_[0-9]+]]
; THUMB: blx
; THUMB: tlv_atexit
; THUMB: [[TH_end]]:
diff --git a/test/CodeGen/ARM/execute-only-big-stack-frame.ll b/test/CodeGen/ARM/execute-only-big-stack-frame.ll
index 0fe67f9863a58..24c6a06d6af18 100644
--- a/test/CodeGen/ARM/execute-only-big-stack-frame.ll
+++ b/test/CodeGen/ARM/execute-only-big-stack-frame.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -mtriple=thumbv7m -arm-execute-only -O0 %s -o - \
+; RUN: llc < %s -mtriple=thumbv7m -mattr=+execute-only -O0 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-SUBW-ADDW %s
-; RUN: llc < %s -mtriple=thumbv8m.base -arm-execute-only -O0 %s -o - \
+; RUN: llc < %s -mtriple=thumbv8m.base -mattr=+execute-only -O0 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-MOVW-MOVT-ADD %s
-; RUN: llc < %s -mtriple=thumbv8m.main -arm-execute-only -O0 %s -o - \
+; RUN: llc < %s -mtriple=thumbv8m.main -mattr=+execute-only -O0 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-SUBW-ADDW %s
define i8 @test_big_stack_frame() {
diff --git a/test/CodeGen/ARM/execute-only-section.ll b/test/CodeGen/ARM/execute-only-section.ll
index 6e1973cd0f144..a3313d8c2f737 100644
--- a/test/CodeGen/ARM/execute-only-section.ll
+++ b/test/CodeGen/ARM/execute-only-section.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv7m -arm-execute-only %s -o - | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8m.base -arm-execute-only %s -o - | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8m.main -arm-execute-only %s -o - | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7m -mattr=+execute-only %s -o - | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8m.base -mattr=+execute-only %s -o - | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8m.main -mattr=+execute-only %s -o - | FileCheck %s
; CHECK: .section .text,"axy",%progbits,unique,0
; CHECK-NOT: .section
diff --git a/test/CodeGen/ARM/execute-only.ll b/test/CodeGen/ARM/execute-only.ll
index 1f9e8bf2813cb..f8c3d279573ba 100644
--- a/test/CodeGen/ARM/execute-only.ll
+++ b/test/CodeGen/ARM/execute-only.ll
@@ -1,6 +1,6 @@
-; RUN: llc -mtriple=thumbv8m.base-eabi -arm-execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2BASE %s
-; RUN: llc -mtriple=thumbv7m-eabi -arm-execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
-; RUN: llc -mtriple=thumbv8m.main-eabi -arm-execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
+; RUN: llc -mtriple=thumbv8m.base-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2BASE %s
+; RUN: llc -mtriple=thumbv7m-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
+; RUN: llc -mtriple=thumbv8m.main-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
@var = global i32 0
diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll
index 9148ac109ae38..257d99d119282 100644
--- a/test/CodeGen/ARM/fp16-promote.ll
+++ b/test/CodeGen/ARM/fp16-promote.ll
@@ -687,8 +687,8 @@ define void @test_maxnan(half* %p) #0 {
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-VFP-LIBCALL: vbsl
-; CHECK-NOVFP: bic
; CHECK-NOVFP: and
+; CHECK-NOVFP: bic
; CHECK-NOVFP: orr
; CHECK-LIBCALL: bl __aeabi_f2h
define void @test_copysign(half* %p, half* %q) #0 {
@@ -818,25 +818,24 @@ define void @test_fmuladd(half* %p, half* %q, half* %r) #0 {
; CHECK-ALL-LABEL: test_insertelement:
; CHECK-ALL: sub sp, sp, #8
; CHECK-ALL: ldrh
-; CHECK-ALL: strh
; CHECK-ALL: ldrh
-; CHECK-ALL: strh
; CHECK-ALL: ldrh
-; CHECK-ALL: strh
; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: mov
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: mov
; CHECK-ALL-DAG: ldrh
; CHECK-ALL-DAG: orr
-; CHECK-ALL: strh
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: ldrh
+; CHECK-ALL-DAG: ldrh
+; CHECK-ALL-DAG: ldrh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
; CHECK-ALL: add sp, sp, #8
define void @test_insertelement(half* %p, <4 x half>* %q, i32 %i) #0 {
%a = load half, half* %p, align 2
diff --git a/test/CodeGen/ARM/fp16-v3.ll b/test/CodeGen/ARM/fp16-v3.ll
index a37f71d9ba881..e84fee2c2e1b5 100644
--- a/test/CodeGen/ARM/fp16-v3.ll
+++ b/test/CodeGen/ARM/fp16-v3.ll
@@ -11,8 +11,8 @@ target triple = "armv7a--none-eabi"
; CHECK: vadd.f32 [[SREG5:s[0-9]+]], [[SREG4]], [[SREG1]]
; CHECK-NEXT: vcvtb.f16.f32 [[SREG6:s[0-9]+]], [[SREG5]]
; CHECK-NEXT: vmov [[RREG1:r[0-9]+]], [[SREG6]]
-; CHECK-NEXT: uxth [[RREG2:r[0-9]+]], [[RREG1]]
-; CHECK-NEXT: pkhbt [[RREG3:r[0-9]+]], [[RREG1]], [[RREG1]], lsl #16
+; CHECK-DAG: uxth [[RREG2:r[0-9]+]], [[RREG1]]
+; CHECK-DAG: pkhbt [[RREG3:r[0-9]+]], [[RREG1]], [[RREG1]], lsl #16
; CHECK-DAG: strh [[RREG1]], [r0, #4]
; CHECK-DAG: vmov [[DREG:d[0-9]+]], [[RREG3]], [[RREG2]]
; CHECK-DAG: vst1.32 {[[DREG]][0]}, [r0:32]
diff --git a/test/CodeGen/ARM/ifcvt7.ll b/test/CodeGen/ARM/ifcvt7.ll
index e0d2b7cffb442..ed443a1814e62 100644
--- a/test/CodeGen/ARM/ifcvt7.ll
+++ b/test/CodeGen/ARM/ifcvt7.ll
@@ -5,8 +5,6 @@
define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
; CHECK: cmpeq
-; CHECK: moveq
-; CHECK: popeq
entry:
br label %tailrecurse
diff --git a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
index 74117d3896bdc..a633c0291c60a 100644
--- a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
+++ b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
@@ -55,8 +55,8 @@ define void @i24_and_or(i24* %a) {
define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
; LE-LABEL: i24_insert_bit:
; LE: @ BB#0:
-; LE-NEXT: ldrh r2, [r0]
; LE-NEXT: mov r3, #255
+; LE-NEXT: ldrh r2, [r0]
; LE-NEXT: orr r3, r3, #57088
; LE-NEXT: and r2, r2, r3
; LE-NEXT: orr r1, r2, r1, lsl #13
@@ -99,8 +99,8 @@ define void @i56_or(i56* %a) {
; BE-NEXT: orr r2, r3, r2, lsl #8
; BE-NEXT: orr r2, r2, r12, lsl #24
; BE-NEXT: orr r2, r2, #384
-; BE-NEXT: lsr r3, r2, #8
; BE-NEXT: strb r2, [r1, #2]
+; BE-NEXT: lsr r3, r2, #8
; BE-NEXT: strh r3, [r1]
; BE-NEXT: bic r1, r12, #255
; BE-NEXT: orr r1, r1, r2, lsr #24
@@ -127,8 +127,8 @@ define void @i56_and_or(i56* %a) {
; BE-NEXT: mov r3, #128
; BE-NEXT: ldrh r2, [r1, #4]!
; BE-NEXT: strb r3, [r1, #2]
-; BE-NEXT: lsl r2, r2, #8
; BE-NEXT: ldr r12, [r0]
+; BE-NEXT: lsl r2, r2, #8
; BE-NEXT: orr r2, r2, r12, lsl #24
; BE-NEXT: orr r2, r2, #384
; BE-NEXT: lsr r3, r2, #8
diff --git a/test/CodeGen/ARM/indirectbr.ll b/test/CodeGen/ARM/indirectbr.ll
index 90defad43a7d8..a3ec2a7f3e777 100644
--- a/test/CodeGen/ARM/indirectbr.ll
+++ b/test/CodeGen/ARM/indirectbr.ll
@@ -56,9 +56,11 @@ L2: ; preds = %L3, %bb2
L1: ; preds = %L2, %bb2
%res.3 = phi i32 [ %phitmp, %L2 ], [ 2, %bb2 ] ; <i32> [#uses=1]
; ARM-LABEL: %L1
+; ARM: ldr [[R_NEXTADDR:r[0-9]+]], LCPI
; ARM: ldr [[R1:r[0-9]+]], LCPI
+; ARM: add [[R_NEXTADDR_b:r[0-9]+]], pc, [[R_NEXTADDR]]
; ARM: add [[R1b:r[0-9]+]], pc, [[R1]]
-; ARM: str [[R1b]]
+; ARM: str [[R1b]], {{\[}}[[R_NEXTADDR_b]]]
; THUMB-LABEL: %L1
; THUMB: ldr [[R2:r[0-9]+]], LCPI
diff --git a/test/CodeGen/ARM/jump-table-islands.ll b/test/CodeGen/ARM/jump-table-islands.ll
index 6b4f174c09288..755ca30199ad1 100644
--- a/test/CodeGen/ARM/jump-table-islands.ll
+++ b/test/CodeGen/ARM/jump-table-islands.ll
@@ -13,7 +13,7 @@ define %BigInt @test_moved_jumptable(i1 %tst, i32 %sw, %BigInt %l) {
; CHECK: .long LBB{{[0-9]+_[0-9]+}}-[[JUMP_TABLE]]
; CHECK: [[SKIP_TABLE]]:
-; CHECK: add pc, {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: add pc, {{r[0-9]+|lr}}, {{r[0-9]+|lr}}
br i1 %tst, label %simple, label %complex
simple:
diff --git a/test/CodeGen/ARM/jump-table-tbh.ll b/test/CodeGen/ARM/jump-table-tbh.ll
index 2da8a5fafc408..b3ee68ea0758a 100644
--- a/test/CodeGen/ARM/jump-table-tbh.ll
+++ b/test/CodeGen/ARM/jump-table-tbh.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=thumbv7m-linux-gnu -o - %s | FileCheck %s --check-prefix=T2
-; RUN: llc -mtriple=thumbv6m-linux-gnu -o - %s | FileCheck %s --check-prefix=T1
+; RUN: llc -mtriple=thumbv7m-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=T2
+; RUN: llc -mtriple=thumbv6m-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=T1
declare void @foo(double)
declare i32 @llvm.arm.space(i32, i32)
@@ -10,7 +10,7 @@ define i32 @test_tbh(i1 %tst, i32 %sw, i32 %l) {
; T2-LABEL: test_tbh:
; T2: [[ANCHOR:.LCPI[0-9_]+]]:
; T2: tbh [pc, r{{[0-9]+}}, lsl #1]
-; T2-NEXT: @ BB#1
+; T2-NEXT: @ BB#{{[0-9]+}}
; T2-NEXT: LJTI
; T2-NEXT: .short (.LBB0_[[x:[0-9]+]]-([[ANCHOR]]+4))/2
; T2-NEXT: .short (.LBB0_{{[0-9]+}}-([[ANCHOR]]+4))/2
diff --git a/test/CodeGen/ARM/ldm-stm-i256.ll b/test/CodeGen/ARM/ldm-stm-i256.ll
index 7b4151dabf6dd..151c42e0e1585 100644
--- a/test/CodeGen/ARM/ldm-stm-i256.ll
+++ b/test/CodeGen/ARM/ldm-stm-i256.ll
@@ -17,22 +17,24 @@ entry:
%add6 = add nsw i256 %or, %d
store i256 %add6, i256* %b, align 8
ret void
- ; CHECK-DAG: ldm r3
; CHECK-DAG: ldm r2
- ; CHECK-DAG: ldr {{.*}}, [r3, #20]
+ ; CHECK-DAG: ldr {{.*}}, [r3]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #4]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #8]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #12]
; CHECK-DAG: ldr {{.*}}, [r3, #16]
- ; CHECK-DAG: ldr {{.*}}, [r3, #28]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #20]
; CHECK-DAG: ldr {{.*}}, [r3, #24]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #28]
; CHECK-DAG: ldr {{.*}}, [r2, #20]
- ; CHECK-DAG: ldr {{.*}}, [r2, #16]
- ; CHECK-DAG: ldr {{.*}}, [r2, #28]
; CHECK-DAG: ldr {{.*}}, [r2, #24]
- ; CHECK-DAG: stmib r0
- ; CHECK-DAG: str {{.*}}, [r0]
+ ; CHECK-DAG: ldr {{.*}}, [r2, #28]
+ ; CHECK-DAG: stm r0
+ ; CHECK-DAG: str {{.*}}, [r0, #20]
; CHECK-DAG: str {{.*}}, [r0, #24]
; CHECK-DAG: str {{.*}}, [r0, #28]
- ; CHECK-DAG: str {{.*}}, [r1]
- ; CHECK-DAG: stmib r1
+ ; CHECK-DAG: stm r1
+ ; CHECK-DAG: str {{.*}}, [r1, #20]
; CHECK-DAG: str {{.*}}, [r1, #24]
; CHECK-DAG: str {{.*}}, [r1, #28]
}
diff --git a/test/CodeGen/ARM/legalize-unaligned-load.ll b/test/CodeGen/ARM/legalize-unaligned-load.ll
index eb4e942f07422..ccf93c3ef55e6 100644
--- a/test/CodeGen/ARM/legalize-unaligned-load.ll
+++ b/test/CodeGen/ARM/legalize-unaligned-load.ll
@@ -10,7 +10,7 @@
; CHECK-NOT: str
; CHECK: ldr
; CHECK: str
-; CHECK: bx
+; CHECK: {{bx|pop.*pc}}
define i32 @get_set_complex({ float, float }* noalias nocapture %retptr,
{ i8*, i32 }** noalias nocapture readnone %excinfo,
i8* noalias nocapture readnone %env,
diff --git a/test/CodeGen/ARM/long-setcc.ll b/test/CodeGen/ARM/long-setcc.ll
index f09167ed9e781..1fbc3f2c08388 100644
--- a/test/CodeGen/ARM/long-setcc.ll
+++ b/test/CodeGen/ARM/long-setcc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi < %s | FileCheck %s
define i1 @t1(i64 %x) {
%B = icmp slt i64 %x, 0
diff --git a/test/CodeGen/ARM/long_shift.ll b/test/CodeGen/ARM/long_shift.ll
index 3ec5fa41aa6f9..cf8396db9db54 100644
--- a/test/CodeGen/ARM/long_shift.ll
+++ b/test/CodeGen/ARM/long_shift.ll
@@ -28,15 +28,15 @@ define i32 @f1(i64 %x, i64 %y) {
define i32 @f2(i64 %x, i64 %y) {
; CHECK-LABEL: f2:
-; CHECK-LE: lsr{{.*}}r2
-; CHECK-LE-NEXT: rsb r3, r2, #32
+; CHECK-LE: rsb r3, r2, #32
+; CHECK-LE-NEXT: lsr{{.*}}r2
; CHECK-LE-NEXT: sub r2, r2, #32
; CHECK-LE-NEXT: orr r0, r0, r1, lsl r3
; CHECK-LE-NEXT: cmp r2, #0
; CHECK-LE-NEXT: asrge r0, r1, r2
-; CHECK-BE: lsr{{.*}}r3
-; CHECK-BE-NEXT: rsb r2, r3, #32
+; CHECK-BE: rsb r2, r3, #32
+; CHECK-BE-NEXT: lsr{{.*}}r3
; CHECK-BE-NEXT: orr r1, r1, r0, lsl r2
; CHECK-BE-NEXT: sub r2, r3, #32
; CHECK-BE-NEXT: cmp r2, #0
@@ -49,15 +49,15 @@ define i32 @f2(i64 %x, i64 %y) {
define i32 @f3(i64 %x, i64 %y) {
; CHECK-LABEL: f3:
-; CHECK-LE: lsr{{.*}}r2
-; CHECK-LE-NEXT: rsb r3, r2, #32
+; CHECK-LE: rsb r3, r2, #32
+; CHECK-LE-NEXT: lsr{{.*}}r2
; CHECK-LE-NEXT: sub r2, r2, #32
; CHECK-LE-NEXT: orr r0, r0, r1, lsl r3
; CHECK-LE-NEXT: cmp r2, #0
; CHECK-LE-NEXT: lsrge r0, r1, r2
-; CHECK-BE: lsr{{.*}}r3
-; CHECK-BE-NEXT: rsb r2, r3, #32
+; CHECK-BE: rsb r2, r3, #32
+; CHECK-BE-NEXT: lsr{{.*}}r3
; CHECK-BE-NEXT: orr r1, r1, r0, lsl r2
; CHECK-BE-NEXT: sub r2, r3, #32
; CHECK-BE-NEXT: cmp r2, #0
diff --git a/test/CodeGen/ARM/misched-fusion-aes.ll b/test/CodeGen/ARM/misched-fusion-aes.ll
index d3558ab4abb07..483f26cc8e007 100644
--- a/test/CodeGen/ARM/misched-fusion-aes.ll
+++ b/test/CodeGen/ARM/misched-fusion-aes.ll
@@ -74,15 +74,16 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QA]]
; CHECK: aese.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QB]]
+; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QC:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QC]]
; CHECK: aese.8 [[QD:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QD]]
; CHECK: aese.8 [[QE:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QE]]
+; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QF:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QF]]
-; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QG:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QG]]
; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
@@ -159,15 +160,16 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QA]]
; CHECK: aesd.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QB]]
+; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QC:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QC]]
; CHECK: aesd.8 [[QD:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QD]]
; CHECK: aesd.8 [[QE:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QE]]
+; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QF:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QF]]
-; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QG:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QG]]
; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
diff --git a/test/CodeGen/ARM/select_const.ll b/test/CodeGen/ARM/select_const.ll
index 48fe572bf8a72..23de9c35a5b81 100644
--- a/test/CodeGen/ARM/select_const.ll
+++ b/test/CodeGen/ARM/select_const.ll
@@ -281,16 +281,16 @@ define i64 @opaque_constant1(i1 %cond, i64 %x) {
; CHECK: @ BB#0:
; CHECK-NEXT: .save {r4, lr}
; CHECK-NEXT: push {r4, lr}
-; CHECK-NEXT: ands r12, r0, #1
; CHECK-NEXT: mov lr, #1
+; CHECK-NEXT: ands r12, r0, #1
; CHECK-NEXT: mov r0, #23
-; CHECK-NEXT: eor r3, r3, #1
; CHECK-NEXT: orr lr, lr, #65536
; CHECK-NEXT: mvnne r0, #3
-; CHECK-NEXT: movne r12, #1
; CHECK-NEXT: and r4, r0, lr
-; CHECK-NEXT: eor r2, r2, lr
+; CHECK-NEXT: movne r12, #1
; CHECK-NEXT: subs r0, r4, #1
+; CHECK-NEXT: eor r2, r2, lr
+; CHECK-NEXT: eor r3, r3, #1
; CHECK-NEXT: sbc r1, r12, #0
; CHECK-NEXT: orrs r2, r2, r3
; CHECK-NEXT: movne r0, r4
diff --git a/test/CodeGen/ARM/shift-i64.ll b/test/CodeGen/ARM/shift-i64.ll
index 12cc5fbe03e47..3644afa17ca4a 100644
--- a/test/CodeGen/ARM/shift-i64.ll
+++ b/test/CodeGen/ARM/shift-i64.ll
@@ -29,8 +29,8 @@ define i64 @test_shl(i64 %val, i64 %amt) {
; Explanation for lshr is pretty much the reverse of shl.
define i64 @test_lshr(i64 %val, i64 %amt) {
; CHECK-LABEL: test_lshr:
-; CHECK: lsr r0, r0, r2
; CHECK: rsb [[REVERSE_SHIFT:.*]], r2, #32
+; CHECK: lsr r0, r0, r2
; CHECK: orr r0, r0, r1, lsl [[REVERSE_SHIFT]]
; CHECK: sub [[EXTRA_SHIFT:.*]], r2, #32
; CHECK: cmp [[EXTRA_SHIFT]], #0
diff --git a/test/CodeGen/ARM/ssp-data-layout.ll b/test/CodeGen/ARM/ssp-data-layout.ll
index 92fa0809ed2df..39c279eb90d47 100644
--- a/test/CodeGen/ARM/ssp-data-layout.ll
+++ b/test/CodeGen/ARM/ssp-data-layout.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -march=arm -mcpu=cortex-a8 -mtriple arm-linux-gnu -o - | FileCheck %s
+; RUN: llc < %s -disable-fp-elim -march=arm -mcpu=cortex-a8 -mtriple arm-linux-gnu -target-abi=apcs -o - | FileCheck %s
; This test is fairly fragile. The goal is to ensure that "large" stack
; objects are allocated closest to the stack protector (i.e., farthest away
; from the Stack Pointer.) In standard SSP mode this means that large (>=
diff --git a/test/CodeGen/ARM/str_pre-2.ll b/test/CodeGen/ARM/str_pre-2.ll
index 4b8b4c6bca724..1c6c05de2579d 100644
--- a/test/CodeGen/ARM/str_pre-2.ll
+++ b/test/CodeGen/ARM/str_pre-2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv6-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=armv6-linux-gnu -target-abi=apcs | FileCheck %s
@b = external global i64*
diff --git a/test/CodeGen/ARM/swifterror.ll b/test/CodeGen/ARM/swifterror.ll
index 3fd57c592bfb6..b02adf7912b56 100644
--- a/test/CodeGen/ARM/swifterror.ll
+++ b/test/CodeGen/ARM/swifterror.ll
@@ -420,10 +420,10 @@ define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
; CHECK-ARMV7-DAG: str r8, [s[[STK1:.*]]]
; CHECK-ARMV7-DAG: str r10, [s[[STK2:.*]]]
; Store arguments.
-; CHECK-ARMV7: mov r6, r3
-; CHECK-ARMV7: mov r4, r2
-; CHECK-ARMV7: mov r11, r1
-; CHECK-ARMV7: mov r5, r0
+; CHECK-ARMV7-DAG: mov r6, r3
+; CHECK-ARMV7-DAG: mov r4, r2
+; CHECK-ARMV7-DAG: mov r11, r1
+; CHECK-ARMV7-DAG: mov r5, r0
; Setup call.
; CHECK-ARMV7: mov r0, #1
; CHECK-ARMV7: mov r1, #2
@@ -435,10 +435,10 @@ define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
; Restore original arguments.
; CHECK-ARMV7-DAG: ldr r10, [s[[STK2]]]
; CHECK-ARMV7-DAG: ldr r8, [s[[STK1]]]
-; CHECK-ARMV7: mov r0, r5
-; CHECK-ARMV7: mov r1, r11
-; CHECK-ARMV7: mov r2, r4
-; CHECK-ARMV7: mov r3, r6
+; CHECK-ARMV7-DAG: mov r0, r5
+; CHECK-ARMV7-DAG: mov r1, r11
+; CHECK-ARMV7-DAG: mov r2, r4
+; CHECK-ARMV7-DAG: mov r3, r6
; CHECK-ARMV7: bl _params_in_reg2
; CHECK-ARMV7: pop {r4, r5, r6, r7, r10, r11, pc}
define swiftcc void @params_in_reg(i32, i32, i32, i32, i8* swiftself, %swift_error** nocapture swifterror %err) {
@@ -469,25 +469,25 @@ declare swiftcc void @params_in_reg2(i32, i32, i32, i32, i8* swiftself, %swift_e
; CHECK-ARMV7: mov r8, #0
; CHECK-ARMV7: bl _params_in_reg2
; Restore original arguments.
-; CHECK-ARMV7: ldr r3, [s[[STK2]]]
-; CHECK-ARMV7: ldr r10, [s[[STK1]]]
+; CHECK-ARMV7-DAG: ldr r3, [s[[STK2]]]
+; CHECK-ARMV7-DAG: ldr r10, [s[[STK1]]]
; Store %error_ptr_ref;
-; CHECK-ARMV7: str r8, [s[[STK3:.*]]]
+; CHECK-ARMV7-DAG: str r8, [s[[STK3:.*]]]
; Restore original arguments.
-; CHECK-ARMV7: mov r0, r5
-; CHECK-ARMV7: mov r1, r11
-; CHECK-ARMV7: mov r2, r4
-; CHECK-ARMV7: mov r8, r6
+; CHECK-ARMV7-DAG: mov r0, r5
+; CHECK-ARMV7-DAG: mov r1, r11
+; CHECK-ARMV7-DAG: mov r2, r4
+; CHECK-ARMV7-DAG: mov r8, r6
; CHECK-ARMV7: bl _params_and_return_in_reg2
; Store swifterror return %err;
-; CHECK-ARMV7: str r8, [s[[STK1]]]
+; CHECK-ARMV7-DAG: str r8, [s[[STK1]]]
; Load swifterror value %error_ptr_ref.
-; CHECK-ARMV7: ldr r8, [s[[STK3]]]
+; CHECK-ARMV7-DAG: ldr r8, [s[[STK3]]]
; Save return values.
-; CHECK-ARMV7: mov r4, r0
-; CHECK-ARMV7: mov r5, r1
-; CHECK-ARMV7: mov r6, r2
-; CHECK-ARMV7: mov r11, r3
+; CHECK-ARMV7-DAG: mov r4, r0
+; CHECK-ARMV7-DAG: mov r5, r1
+; CHECK-ARMV7-DAG: mov r6, r2
+; CHECK-ARMV7-DAG: mov r11, r3
; Setup call.
; CHECK-ARMV7: mov r0, #1
; CHECK-ARMV7: mov r1, #2
@@ -496,12 +496,12 @@ declare swiftcc void @params_in_reg2(i32, i32, i32, i32, i8* swiftself, %swift_e
; CHECK-ARMV7: mov r10, #0
; CHECK-ARMV7: bl _params_in_reg2
; Load swifterror %err;
-; CHECK-ARMV7: ldr r8, [s[[STK1]]]
+; CHECK-ARMV7-DAG: ldr r8, [s[[STK1]]]
; Restore return values for returning.
-; CHECK-ARMV7: mov r0, r4
-; CHECK-ARMV7: mov r1, r5
-; CHECK-ARMV7: mov r2, r6
-; CHECK-ARMV7: mov r3, r11
+; CHECK-ARMV7-DAG: mov r0, r4
+; CHECK-ARMV7-DAG: mov r1, r5
+; CHECK-ARMV7-DAG: mov r2, r6
+; CHECK-ARMV7-DAG: mov r3, r11
; CHECK-ARMV7: pop {r4, r5, r6, r7, r10, r11, pc}
define swiftcc { i32, i32, i32, i32} @params_and_return_in_reg(i32, i32, i32, i32, i8* swiftself, %swift_error** nocapture swifterror %err) {
%error_ptr_ref = alloca swifterror %swift_error*, align 8
diff --git a/test/CodeGen/ARM/thumb2-it-block.ll b/test/CodeGen/ARM/thumb2-it-block.ll
index aaefc0a148639..6d93869ec10fd 100644
--- a/test/CodeGen/ARM/thumb2-it-block.ll
+++ b/test/CodeGen/ARM/thumb2-it-block.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumbv8 %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv8 < %s | FileCheck %s
; PR11107
define i32 @test(i32 %a, i32 %b) {
diff --git a/test/CodeGen/ARM/vcgt.ll b/test/CodeGen/ARM/vcgt.ll
index c39c939d6c955..1e68ff13699aa 100644
--- a/test/CodeGen/ARM/vcgt.ll
+++ b/test/CodeGen/ARM/vcgt.ll
@@ -162,8 +162,8 @@ define <4 x i32> @vacgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
; rdar://7923010
define <4 x i32> @vcgt_zext(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK-LABEL: vcgt_zext:
-;CHECK: vmov.i32 [[Q0:q[0-9]+]], #0x1
-;CHECK: vcgt.f32 [[Q1:q[0-9]+]]
+;CHECK-DAG: vmov.i32 [[Q0:q[0-9]+]], #0x1
+;CHECK-DAG: vcgt.f32 [[Q1:q[0-9]+]]
;CHECK: vand [[Q2:q[0-9]+]], [[Q1]], [[Q0]]
%tmp1 = load <4 x float>, <4 x float>* %A
%tmp2 = load <4 x float>, <4 x float>* %B
diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll
index 2ef2a0697ec95..8623d2c164bae 100644
--- a/test/CodeGen/ARM/vector-DAGCombine.ll
+++ b/test/CodeGen/ARM/vector-DAGCombine.ll
@@ -237,14 +237,14 @@ entry:
; illegal type to a legal type.
define <2 x i8> @test_truncate(<2 x i128> %in) {
; CHECK-LABEL: test_truncate:
-; CHECK: mov [[BASE:r[0-9]+]], sp
-; CHECK-NEXT: vld1.32 {[[REG1:d[0-9]+]][0]}, {{\[}}[[BASE]]:32]
-; CHECK-NEXT: add [[BASE2:r[0-9]+]], [[BASE]], #4
-; CHECK-NEXT: vld1.32 {[[REG1]][1]}, {{\[}}[[BASE2]]:32]
; REG2 Should map on the same Q register as REG1, i.e., REG2 = REG1 - 1, but we
; cannot express that.
-; CHECK-NEXT: vmov.32 [[REG2:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r0
+; CHECK-NEXT: mov [[BASE:r[0-9]+]], sp
+; CHECK-NEXT: vld1.32 {[[REG1:d[0-9]+]][0]}, {{\[}}[[BASE]]:32]
+; CHECK-NEXT: add [[BASE2:r[0-9]+]], [[BASE]], #4
; CHECK-NEXT: vmov.32 [[REG2]][1], r1
+; CHECK-NEXT: vld1.32 {[[REG1]][1]}, {{\[}}[[BASE2]]:32]
; The Q register used here should match floor(REG1/2), but we cannot express that.
; CHECK-NEXT: vmovn.i64 [[RES:d[0-9]+]], q{{[0-9]+}}
; CHECK-NEXT: vmov r0, r1, [[RES]]
diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll
index 5742dc314978f..5b524145be760 100644
--- a/test/CodeGen/ARM/vext.ll
+++ b/test/CodeGen/ARM/vext.ll
@@ -182,9 +182,9 @@ define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: test_interleaved:
; CHECK: @ BB#0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vext.16 d16, d16, d17, #3
; CHECK-NEXT: vorr d17, d16, d16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vuzp.16 d16, d17
; CHECK-NEXT: vzip.16 d16, d18
; CHECK-NEXT: vmov r0, r1, d16
@@ -217,16 +217,16 @@ define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
; CHECK-LABEL: test_multisource:
; CHECK: @ BB#0:
; CHECK-NEXT: mov r1, r0
-; CHECK-NEXT: add r2, r0, #32
-; CHECK-NEXT: add r0, r0, #48
+; CHECK-NEXT: add r2, r0, #48
+; CHECK-NEXT: add r0, r0, #32
; CHECK-NEXT: vld1.16 {d16, d17}, [r1:128]!
-; CHECK-NEXT: vld1.64 {d20, d21}, [r2:128]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r0:128]
-; CHECK-NEXT: vld1.64 {d22, d23}, [r1:128]
+; CHECK-NEXT: vld1.64 {d20, d21}, [r0:128]
; CHECK-NEXT: vorr d24, d20, d20
+; CHECK-NEXT: vld1.64 {d18, d19}, [r2:128]
+; CHECK-NEXT: vld1.64 {d22, d23}, [r1:128]
; CHECK-NEXT: vzip.16 d24, d18
-; CHECK-NEXT: vext.16 d18, d20, d24, #2
; CHECK-NEXT: vtrn.16 q8, q11
+; CHECK-NEXT: vext.16 d18, d20, d24, #2
; CHECK-NEXT: vext.16 d16, d18, d16, #2
; CHECK-NEXT: vext.16 d16, d16, d16, #2
; CHECK-NEXT: vmov r0, r1, d16
@@ -259,24 +259,24 @@ define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: test_illegal:
; CHECK: @ BB#0:
-; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
-; CHECK-NEXT: vmov.u16 r1, d16[0]
-; CHECK-NEXT: vmov.u16 r0, d17[3]
-; CHECK-NEXT: vorr d22, d16, d16
-; CHECK-NEXT: vorr d23, d16, d16
-; CHECK-NEXT: vmov.16 d20[0], r1
-; CHECK-NEXT: vuzp.16 d22, d23
-; CHECK-NEXT: vmov.u16 r1, d17[1]
-; CHECK-NEXT: vmov.16 d20[1], r0
-; CHECK-NEXT: vuzp.16 d22, d18
-; CHECK-NEXT: vmov.16 d20[2], r1
-; CHECK-NEXT: vmov.u16 r0, d19[1]
-; CHECK-NEXT: vext.16 d21, d16, d18, #3
-; CHECK-NEXT: vmov.16 d20[3], r0
-; CHECK-NEXT: vmov r0, r1, d20
-; CHECK-NEXT: vmov r2, r3, d21
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vorr d22, d16, d16
+; CHECK-NEXT: vmov.u16 r0, d16[0]
+; CHECK-NEXT: vorr d23, d16, d16
+; CHECK-NEXT: vmov.u16 r2, d17[3]
+; CHECK-NEXT: vmov.u16 r3, d17[1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT: vmov.u16 r1, d19[1]
+; CHECK-NEXT: vuzp.16 d22, d23
+; CHECK-NEXT: vuzp.16 d22, d18
+; CHECK-NEXT: vmov.16 d20[0], r0
+; CHECK-NEXT: vmov.16 d20[1], r2
+; CHECK-NEXT: vmov.16 d20[2], r3
+; CHECK-NEXT: vmov.16 d20[3], r1
+; CHECK-NEXT: vext.16 d21, d16, d18, #3
+; CHECK-NEXT: vmov r0, r1, d20
+; CHECK-NEXT: vmov r2, r3, d21
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 13, i32 3, i32 2, i32 2, i32 9>
@@ -289,10 +289,10 @@ define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>
; CHECK-LABEL: test_elem_mismatch:
; CHECK: @ BB#0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0:128]
-; CHECK-NEXT: vmov.32 r2, d16[0]
-; CHECK-NEXT: vmov.32 r0, d17[0]
-; CHECK-NEXT: vmov.16 d16[0], r2
-; CHECK-NEXT: vmov.16 d16[1], r0
+; CHECK-NEXT: vmov.32 r0, d16[0]
+; CHECK-NEXT: vmov.32 r2, d17[0]
+; CHECK-NEXT: vmov.16 d16[0], r0
+; CHECK-NEXT: vmov.16 d16[1], r2
; CHECK-NEXT: vstr d16, [r1]
; CHECK-NEXT: mov pc, lr
%tmp0 = load <2 x i64>, <2 x i64>* %src, align 16
diff --git a/test/CodeGen/ARM/vfp.ll b/test/CodeGen/ARM/vfp.ll
index 03c0354aa1df9..8fa5113d8a31f 100644
--- a/test/CodeGen/ARM/vfp.ll
+++ b/test/CodeGen/ARM/vfp.ll
@@ -40,8 +40,8 @@ define void @test_add(float* %P, double* %D) {
define void @test_ext_round(float* %P, double* %D) {
;CHECK-LABEL: test_ext_round:
%a = load float, float* %P ; <float> [#uses=1]
-;CHECK: vcvt.f64.f32
-;CHECK: vcvt.f32.f64
+;CHECK-DAG: vcvt.f64.f32
+;CHECK-DAG: vcvt.f32.f64
%b = fpext float %a to double ; <double> [#uses=1]
%A = load double, double* %D ; <double> [#uses=1]
%B = fptrunc double %A to float ; <float> [#uses=1]
diff --git a/test/CodeGen/ARM/vld1.ll b/test/CodeGen/ARM/vld1.ll
index bdb3847697410..c50e0beea4d1e 100644
--- a/test/CodeGen/ARM/vld1.ll
+++ b/test/CodeGen/ARM/vld1.ll
@@ -78,7 +78,7 @@ define <16 x i8> @vld1Qi8(i8* %A) nounwind {
;Check for a post-increment updating load.
define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind {
;CHECK-LABEL: vld1Qi8_update:
-;CHECK: vld1.8 {d16, d17}, [{{r[0-9]+}}:64]!
+;CHECK: vld1.8 {d16, d17}, [{{r[0-9]+|lr}}:64]!
%A = load i8*, i8** %ptr
%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %A, i32 8)
%tmp2 = getelementptr i8, i8* %A, i32 16
diff --git a/test/CodeGen/ARM/vld2.ll b/test/CodeGen/ARM/vld2.ll
index 1ca16587bd911..6ef37c1b66782 100644
--- a/test/CodeGen/ARM/vld2.ll
+++ b/test/CodeGen/ARM/vld2.ll
@@ -14,7 +14,7 @@
define <8 x i8> @vld2i8(i8* %A) nounwind {
;CHECK-LABEL: vld2i8:
;Check the alignment value. Max for this instruction is 128 bits:
-;CHECK: vld2.8 {d16, d17}, [r0:64]
+;CHECK: vld2.8 {d16, d17}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8.p0i8(i8* %A, i32 8)
%tmp2 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 1
@@ -25,7 +25,7 @@ define <8 x i8> @vld2i8(i8* %A) nounwind {
define <4 x i16> @vld2i16(i16* %A) nounwind {
;CHECK-LABEL: vld2i16:
;Check the alignment value. Max for this instruction is 128 bits:
-;CHECK: vld2.16 {d16, d17}, [r0:128]
+;CHECK: vld2.16 {d16, d17}, [{{r[0-9]+|lr}}:128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16.p0i8(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 0
@@ -59,7 +59,7 @@ define <2 x float> @vld2f(float* %A) nounwind {
;Check for a post-increment updating load.
define <2 x float> @vld2f_update(float** %ptr) nounwind {
;CHECK-LABEL: vld2f_update:
-;CHECK: vld2.32 {d16, d17}, [r1]!
+;CHECK: vld2.32 {d16, d17}, [{{r[0-9]+|lr}}]!
%A = load float*, float** %ptr
%tmp0 = bitcast float* %A to i8*
%tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32.p0i8(i8* %tmp0, i32 1)
@@ -74,7 +74,7 @@ define <2 x float> @vld2f_update(float** %ptr) nounwind {
define <1 x i64> @vld2i64(i64* %A) nounwind {
;CHECK-LABEL: vld2i64:
;Check the alignment value. Max for this instruction is 128 bits:
-;CHECK: vld1.64 {d16, d17}, [r0:128]
+;CHECK: vld1.64 {d16, d17}, [{{r[0-9]+|lr}}:128]
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64.p0i8(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int64x1x2_t %tmp1, 0
@@ -86,7 +86,7 @@ define <1 x i64> @vld2i64(i64* %A) nounwind {
define <16 x i8> @vld2Qi8(i8* %A) nounwind {
;CHECK-LABEL: vld2Qi8:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld2.8 {d16, d17, d18, d19}, [r0:64]
+;CHECK: vld2.8 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8.p0i8(i8* %A, i32 8)
%tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 1
@@ -97,7 +97,7 @@ define <16 x i8> @vld2Qi8(i8* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <16 x i8> @vld2Qi8_update(i8** %ptr, i32 %inc) nounwind {
;CHECK-LABEL: vld2Qi8_update:
-;CHECK: vld2.8 {d16, d17, d18, d19}, [r2:128], r1
+;CHECK: vld2.8 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:128], r1
%A = load i8*, i8** %ptr
%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8.p0i8(i8* %A, i32 16)
%tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
@@ -111,7 +111,7 @@ define <16 x i8> @vld2Qi8_update(i8** %ptr, i32 %inc) nounwind {
define <8 x i16> @vld2Qi16(i16* %A) nounwind {
;CHECK-LABEL: vld2Qi16:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld2.16 {d16, d17, d18, d19}, [r0:128]
+;CHECK: vld2.16 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16.p0i8(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int16x8x2_t %tmp1, 0
@@ -123,7 +123,7 @@ define <8 x i16> @vld2Qi16(i16* %A) nounwind {
define <4 x i32> @vld2Qi32(i32* %A) nounwind {
;CHECK-LABEL: vld2Qi32:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld2.32 {d16, d17, d18, d19}, [r0:256]
+;CHECK: vld2.32 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:256]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32.p0i8(i8* %tmp0, i32 64)
%tmp2 = extractvalue %struct.__neon_int32x4x2_t %tmp1, 0
diff --git a/test/CodeGen/ARM/vld3.ll b/test/CodeGen/ARM/vld3.ll
index c3e8ee8691fd7..0eaad0f900354 100644
--- a/test/CodeGen/ARM/vld3.ll
+++ b/test/CodeGen/ARM/vld3.ll
@@ -15,7 +15,7 @@
define <8 x i8> @vld3i8(i8* %A) nounwind {
;CHECK-LABEL: vld3i8:
;Check the alignment value. Max for this instruction is 64 bits:
-;CHECK: vld3.8 {d16, d17, d18}, [r0:64]
+;CHECK: vld3.8 {d16, d17, d18}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A, i32 32)
%tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2
@@ -37,7 +37,7 @@ define <4 x i16> @vld3i16(i16* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <4 x i16> @vld3i16_update(i16** %ptr, i32 %inc) nounwind {
;CHECK-LABEL: vld3i16_update:
-;CHECK: vld3.16 {d16, d17, d18}, [{{r[0-9]+}}], {{r[0-9]+}}
+;CHECK: vld3.16 {d16, d17, d18}, [{{r[0-9]+|lr}}], {{r[0-9]+|lr}}
%A = load i16*, i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16.p0i8(i8* %tmp0, i32 1)
@@ -74,7 +74,7 @@ define <2 x float> @vld3f(float* %A) nounwind {
define <1 x i64> @vld3i64(i64* %A) nounwind {
;CHECK-LABEL: vld3i64:
;Check the alignment value. Max for this instruction is 64 bits:
-;CHECK: vld1.64 {d16, d17, d18}, [r0:64]
+;CHECK: vld1.64 {d16, d17, d18}, [{{r[0-9]+|lr}}:64]
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64.p0i8(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 0
@@ -85,7 +85,7 @@ define <1 x i64> @vld3i64(i64* %A) nounwind {
define <1 x i64> @vld3i64_update(i64** %ptr, i64* %A) nounwind {
;CHECK-LABEL: vld3i64_update:
-;CHECK: vld1.64 {d16, d17, d18}, [r1:64]!
+;CHECK: vld1.64 {d16, d17, d18}, [{{r[0-9]+|lr}}:64]!
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64.p0i8(i8* %tmp0, i32 16)
%tmp5 = getelementptr i64, i64* %A, i32 3
@@ -99,8 +99,8 @@ define <1 x i64> @vld3i64_update(i64** %ptr, i64* %A) nounwind {
define <16 x i8> @vld3Qi8(i8* %A) nounwind {
;CHECK-LABEL: vld3Qi8:
;Check the alignment value. Max for this instruction is 64 bits:
-;CHECK: vld3.8 {d16, d18, d20}, [r0:64]!
-;CHECK: vld3.8 {d17, d19, d21}, [r0:64]
+;CHECK: vld3.8 {d16, d18, d20}, [{{r[0-9]+|lr}}:64]!
+;CHECK: vld3.8 {d17, d19, d21}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8.p0i8(i8* %A, i32 32)
%tmp2 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 2
@@ -135,8 +135,8 @@ define <4 x i32> @vld3Qi32(i32* %A) nounwind {
;Check for a post-increment updating load.
define <4 x i32> @vld3Qi32_update(i32** %ptr) nounwind {
;CHECK-LABEL: vld3Qi32_update:
-;CHECK: vld3.32 {d16, d18, d20}, [r[[R:[0-9]+]]]!
-;CHECK: vld3.32 {d17, d19, d21}, [r[[R]]]!
+;CHECK: vld3.32 {d16, d18, d20}, {{\[}}[[R:r[0-9]+|lr]]]!
+;CHECK: vld3.32 {d17, d19, d21}, {{\[}}[[R]]]!
%A = load i32*, i32** %ptr
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32.p0i8(i8* %tmp0, i32 1)
diff --git a/test/CodeGen/ARM/vld4.ll b/test/CodeGen/ARM/vld4.ll
index 10570039a9d2a..5663e6d41f021 100644
--- a/test/CodeGen/ARM/vld4.ll
+++ b/test/CodeGen/ARM/vld4.ll
@@ -14,7 +14,7 @@
define <8 x i8> @vld4i8(i8* %A) nounwind {
;CHECK-LABEL: vld4i8:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld4.8 {d16, d17, d18, d19}, [r0:64]
+;CHECK: vld4.8 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8* %A, i32 8)
%tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
@@ -25,7 +25,7 @@ define <8 x i8> @vld4i8(i8* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <8 x i8> @vld4i8_update(i8** %ptr, i32 %inc) nounwind {
;CHECK-LABEL: vld4i8_update:
-;CHECK: vld4.8 {d16, d17, d18, d19}, [r2:128], r1
+;CHECK: vld4.8 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:128], r1
%A = load i8*, i8** %ptr
%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8* %A, i32 16)
%tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
@@ -39,7 +39,7 @@ define <8 x i8> @vld4i8_update(i8** %ptr, i32 %inc) nounwind {
define <4 x i16> @vld4i16(i16* %A) nounwind {
;CHECK-LABEL: vld4i16:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld4.16 {d16, d17, d18, d19}, [r0:128]
+;CHECK: vld4.16 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16.p0i8(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 0
@@ -51,7 +51,7 @@ define <4 x i16> @vld4i16(i16* %A) nounwind {
define <2 x i32> @vld4i32(i32* %A) nounwind {
;CHECK-LABEL: vld4i32:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld4.32 {d16, d17, d18, d19}, [r0:256]
+;CHECK: vld4.32 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:256]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0i8(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
@@ -74,7 +74,7 @@ define <2 x float> @vld4f(float* %A) nounwind {
define <1 x i64> @vld4i64(i64* %A) nounwind {
;CHECK-LABEL: vld4i64:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld1.64 {d16, d17, d18, d19}, [r0:256]
+;CHECK: vld1.64 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:256]
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64.p0i8(i8* %tmp0, i32 64)
%tmp2 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 0
@@ -85,7 +85,7 @@ define <1 x i64> @vld4i64(i64* %A) nounwind {
define <1 x i64> @vld4i64_update(i64** %ptr, i64* %A) nounwind {
;CHECK-LABEL: vld4i64_update:
-;CHECK: vld1.64 {d16, d17, d18, d19}, [r1:256]!
+;CHECK: vld1.64 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:256]!
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64.p0i8(i8* %tmp0, i32 64)
%tmp5 = getelementptr i64, i64* %A, i32 4
@@ -99,8 +99,8 @@ define <1 x i64> @vld4i64_update(i64** %ptr, i64* %A) nounwind {
define <16 x i8> @vld4Qi8(i8* %A) nounwind {
;CHECK-LABEL: vld4Qi8:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld4.8 {d16, d18, d20, d22}, [r0:256]!
-;CHECK: vld4.8 {d17, d19, d21, d23}, [r0:256]
+;CHECK: vld4.8 {d16, d18, d20, d22}, [{{r[0-9]+|lr}}:256]!
+;CHECK: vld4.8 {d17, d19, d21, d23}, [{{r[0-9]+|lr}}:256]
%tmp1 = call %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8.p0i8(i8* %A, i32 64)
%tmp2 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 2
@@ -111,8 +111,8 @@ define <16 x i8> @vld4Qi8(i8* %A) nounwind {
define <8 x i16> @vld4Qi16(i16* %A) nounwind {
;CHECK-LABEL: vld4Qi16:
;Check for no alignment specifier.
-;CHECK: vld4.16 {d16, d18, d20, d22}, [r0]!
-;CHECK: vld4.16 {d17, d19, d21, d23}, [r0]
+;CHECK: vld4.16 {d16, d18, d20, d22}, [{{r[0-9]+|lr}}]!
+;CHECK: vld4.16 {d17, d19, d21, d23}, [{{r[0-9]+|lr}}]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16.p0i8(i8* %tmp0, i32 1)
%tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0
@@ -124,8 +124,8 @@ define <8 x i16> @vld4Qi16(i16* %A) nounwind {
;Check for a post-increment updating load.
define <8 x i16> @vld4Qi16_update(i16** %ptr) nounwind {
;CHECK-LABEL: vld4Qi16_update:
-;CHECK: vld4.16 {d16, d18, d20, d22}, [r1:64]!
-;CHECK: vld4.16 {d17, d19, d21, d23}, [r1:64]!
+;CHECK: vld4.16 {d16, d18, d20, d22}, [{{r[0-9]+|lr}}:64]!
+;CHECK: vld4.16 {d17, d19, d21, d23}, [{{r[0-9]+|lr}}:64]!
%A = load i16*, i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16.p0i8(i8* %tmp0, i32 8)
diff --git a/test/CodeGen/ARM/vlddup.ll b/test/CodeGen/ARM/vlddup.ll
index 71ca0f7915242..72f9434fd10a6 100644
--- a/test/CodeGen/ARM/vlddup.ll
+++ b/test/CodeGen/ARM/vlddup.ll
@@ -3,7 +3,7 @@
define <8 x i8> @vld1dupi8(i8* %A) nounwind {
;CHECK-LABEL: vld1dupi8:
;Check the (default) alignment value.
-;CHECK: vld1.8 {d16[]}, [r0]
+;CHECK: vld1.8 {d16[]}, [{{r[0-9]+|lr}}]
%tmp1 = load i8, i8* %A, align 8
%tmp2 = insertelement <8 x i8> undef, i8 %tmp1, i32 0
%tmp3 = shufflevector <8 x i8> %tmp2, <8 x i8> undef, <8 x i32> zeroinitializer
@@ -13,7 +13,7 @@ define <8 x i8> @vld1dupi8(i8* %A) nounwind {
define <8 x i8> @vld1dupi8_preinc(i8** noalias nocapture %a, i32 %b) nounwind {
entry:
;CHECK-LABEL: vld1dupi8_preinc:
-;CHECK: vld1.8 {d16[]}, [r1]
+;CHECK: vld1.8 {d16[]}, [{{r[0-9]+|lr}}]
%0 = load i8*, i8** %a, align 4
%add.ptr = getelementptr inbounds i8, i8* %0, i32 %b
%1 = load i8, i8* %add.ptr, align 1
@@ -26,7 +26,7 @@ entry:
define <8 x i8> @vld1dupi8_postinc_fixed(i8** noalias nocapture %a) nounwind {
entry:
;CHECK-LABEL: vld1dupi8_postinc_fixed:
-;CHECK: vld1.8 {d16[]}, [r1]!
+;CHECK: vld1.8 {d16[]}, [{{r[0-9]+|lr}}]!
%0 = load i8*, i8** %a, align 4
%1 = load i8, i8* %0, align 1
%2 = insertelement <8 x i8> undef, i8 %1, i32 0
@@ -39,7 +39,7 @@ entry:
define <8 x i8> @vld1dupi8_postinc_register(i8** noalias nocapture %a, i32 %n) nounwind {
entry:
;CHECK-LABEL: vld1dupi8_postinc_register:
-;CHECK: vld1.8 {d16[]}, [r2], r1
+;CHECK: vld1.8 {d16[]}, [{{r[0-9]+|lr}}], r1
%0 = load i8*, i8** %a, align 4
%1 = load i8, i8* %0, align 1
%2 = insertelement <8 x i8> undef, i8 %1, i32 0
@@ -52,7 +52,7 @@ entry:
define <16 x i8> @vld1dupqi8_preinc(i8** noalias nocapture %a, i32 %b) nounwind {
entry:
;CHECK-LABEL: vld1dupqi8_preinc:
-;CHECK: vld1.8 {d16[], d17[]}, [r1]
+;CHECK: vld1.8 {d16[], d17[]}, [{{r[0-9]+|lr}}]
%0 = load i8*, i8** %a, align 4
%add.ptr = getelementptr inbounds i8, i8* %0, i32 %b
%1 = load i8, i8* %add.ptr, align 1
@@ -65,7 +65,7 @@ entry:
define <16 x i8> @vld1dupqi8_postinc_fixed(i8** noalias nocapture %a) nounwind {
entry:
;CHECK-LABEL: vld1dupqi8_postinc_fixed:
-;CHECK: vld1.8 {d16[], d17[]}, [r1]!
+;CHECK: vld1.8 {d16[], d17[]}, [{{r[0-9]+|lr}}]!
%0 = load i8*, i8** %a, align 4
%1 = load i8, i8* %0, align 1
%2 = insertelement <16 x i8> undef, i8 %1, i32 0
@@ -78,7 +78,7 @@ entry:
define <16 x i8> @vld1dupqi8_postinc_register(i8** noalias nocapture %a, i32 %n) nounwind {
entry:
;CHECK-LABEL: vld1dupqi8_postinc_register:
-;CHECK: vld1.8 {d16[], d17[]}, [r2], r1
+;CHECK: vld1.8 {d16[], d17[]}, [{{r[0-9]+|lr}}], r1
%0 = load i8*, i8** %a, align 4
%1 = load i8, i8* %0, align 1
%2 = insertelement <16 x i8> undef, i8 %1, i32 0
@@ -91,7 +91,7 @@ entry:
define <4 x i16> @vld1dupi16(i16* %A) nounwind {
;CHECK-LABEL: vld1dupi16:
;Check the alignment value. Max for this instruction is 16 bits:
-;CHECK: vld1.16 {d16[]}, [r0:16]
+;CHECK: vld1.16 {d16[]}, [{{r[0-9]+|lr}}:16]
%tmp1 = load i16, i16* %A, align 8
%tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0
%tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -100,7 +100,7 @@ define <4 x i16> @vld1dupi16(i16* %A) nounwind {
define <4 x i16> @vld1dupi16_misaligned(i16* %A) nounwind {
;CHECK-LABEL: vld1dupi16_misaligned:
-;CHECK: vld1.16 {d16[]}, [r0]
+;CHECK: vld1.16 {d16[]}, [{{r[0-9]+|lr}}]
%tmp1 = load i16, i16* %A, align 1
%tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0
%tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -110,7 +110,7 @@ define <4 x i16> @vld1dupi16_misaligned(i16* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <4 x i16> @load_i16_dup_zext(i8* %A) nounwind {
;CHECK-LABEL: load_i16_dup_zext:
-;CHECK: ldrb r0, [r0]
+;CHECK: ldrb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.16 d16, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = zext i8 %tmp1 to i16
@@ -122,7 +122,7 @@ define <4 x i16> @load_i16_dup_zext(i8* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <4 x i16> @load_i16_dup_sext(i8* %A) nounwind {
;CHECK-LABEL: load_i16_dup_sext:
-;CHECK: ldrsb r0, [r0]
+;CHECK: ldrsb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.16 d16, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = sext i8 %tmp1 to i16
@@ -134,7 +134,7 @@ define <4 x i16> @load_i16_dup_sext(i8* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <8 x i16> @load_i16_dupq_zext(i8* %A) nounwind {
;CHECK-LABEL: load_i16_dupq_zext:
-;CHECK: ldrb r0, [r0]
+;CHECK: ldrb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.16 q8, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = zext i8 %tmp1 to i16
@@ -146,7 +146,7 @@ define <8 x i16> @load_i16_dupq_zext(i8* %A) nounwind {
define <2 x i32> @vld1dupi32(i32* %A) nounwind {
;CHECK-LABEL: vld1dupi32:
;Check the alignment value. Max for this instruction is 32 bits:
-;CHECK: vld1.32 {d16[]}, [r0:32]
+;CHECK: vld1.32 {d16[]}, [{{r[0-9]+|lr}}:32]
%tmp1 = load i32, i32* %A, align 8
%tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
%tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
@@ -156,7 +156,7 @@ define <2 x i32> @vld1dupi32(i32* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <4 x i32> @load_i32_dup_zext(i8* %A) nounwind {
;CHECK-LABEL: load_i32_dup_zext:
-;CHECK: ldrb r0, [r0]
+;CHECK: ldrb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.32 q8, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = zext i8 %tmp1 to i32
@@ -168,7 +168,7 @@ define <4 x i32> @load_i32_dup_zext(i8* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <4 x i32> @load_i32_dup_sext(i8* %A) nounwind {
;CHECK-LABEL: load_i32_dup_sext:
-;CHECK: ldrsb r0, [r0]
+;CHECK: ldrsb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.32 q8, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = sext i8 %tmp1 to i32
@@ -179,7 +179,7 @@ define <4 x i32> @load_i32_dup_sext(i8* %A) nounwind {
define <2 x float> @vld1dupf(float* %A) nounwind {
;CHECK-LABEL: vld1dupf:
-;CHECK: vld1.32 {d16[]}, [r0:32]
+;CHECK: vld1.32 {d16[]}, [{{r[0-9]+|lr}}:32]
%tmp0 = load float, float* %A
%tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
@@ -189,7 +189,7 @@ define <2 x float> @vld1dupf(float* %A) nounwind {
define <16 x i8> @vld1dupQi8(i8* %A) nounwind {
;CHECK-LABEL: vld1dupQi8:
;Check the (default) alignment value.
-;CHECK: vld1.8 {d16[], d17[]}, [r0]
+;CHECK: vld1.8 {d16[], d17[]}, [{{r[0-9]+|lr}}]
%tmp1 = load i8, i8* %A, align 8
%tmp2 = insertelement <16 x i8> undef, i8 %tmp1, i32 0
%tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -198,7 +198,7 @@ define <16 x i8> @vld1dupQi8(i8* %A) nounwind {
define <4 x float> @vld1dupQf(float* %A) nounwind {
;CHECK-LABEL: vld1dupQf:
-;CHECK: vld1.32 {d16[], d17[]}, [r0:32]
+;CHECK: vld1.32 {d16[], d17[]}, [{{r[0-9]+|lr}}:32]
%tmp0 = load float, float* %A
%tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
@@ -212,7 +212,7 @@ define <4 x float> @vld1dupQf(float* %A) nounwind {
define <8 x i8> @vld2dupi8(i8* %A) nounwind {
;CHECK-LABEL: vld2dupi8:
;Check the (default) alignment value.
-;CHECK: vld2.8 {d16[], d17[]}, [r0]
+;CHECK: vld2.8 {d16[], d17[]}, [{{r[0-9]+|lr}}]
%tmp0 = tail call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8.p0i8(i8* %A, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
%tmp1 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 0
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
@@ -283,7 +283,7 @@ define <4 x i16> @vld2dupi16(i8* %A) nounwind {
;CHECK-LABEL: vld2dupi16:
;Check that a power-of-two alignment smaller than the total size of the memory
;being loaded is ignored.
-;CHECK: vld2.16 {d16[], d17[]}, [r0]
+;CHECK: vld2.16 {d16[], d17[]}, [{{r[0-9]+|lr}}]
%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16.p0i8(i8* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
%tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -296,7 +296,7 @@ define <4 x i16> @vld2dupi16(i8* %A) nounwind {
;Check for a post-increment updating load.
define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind {
;CHECK-LABEL: vld2dupi16_update:
-;CHECK: vld2.16 {d16[], d17[]}, [r1]!
+;CHECK: vld2.16 {d16[], d17[]}, [{{r[0-9]+|lr}}]!
%A = load i16*, i16** %ptr
%A2 = bitcast i16* %A to i8*
%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16.p0i8(i8* %A2, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
@@ -313,7 +313,7 @@ define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind {
define <4 x i16> @vld2dupi16_odd_update(i16** %ptr) nounwind {
;CHECK-LABEL: vld2dupi16_odd_update:
;CHECK: mov [[INC:r[0-9]+]], #6
-;CHECK: vld2.16 {d16[], d17[]}, [r1], [[INC]]
+;CHECK: vld2.16 {d16[], d17[]}, [{{r[0-9]+|lr}}], [[INC]]
%A = load i16*, i16** %ptr
%A2 = bitcast i16* %A to i8*
%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16.p0i8(i8* %A2, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
@@ -330,7 +330,7 @@ define <4 x i16> @vld2dupi16_odd_update(i16** %ptr) nounwind {
define <2 x i32> @vld2dupi32(i8* %A) nounwind {
;CHECK-LABEL: vld2dupi32:
;Check the alignment value. Max for this instruction is 64 bits:
-;CHECK: vld2.32 {d16[], d17[]}, [r0:64]
+;CHECK: vld2.32 {d16[], d17[]}, [{{r[0-9]+|lr}}:64]
%tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32.p0i8(i8* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16)
%tmp1 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 0
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
@@ -350,7 +350,7 @@ declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32.p0i8(i8*, <2 x
;Check for a post-increment updating load with register increment.
define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind {
;CHECK-LABEL: vld3dupi8_update:
-;CHECK: vld3.8 {d16[], d17[], d18[]}, [r2], r1
+;CHECK: vld3.8 {d16[], d17[], d18[]}, [{{r[0-9]+|lr}}], r1
%A = load i8*, i8** %ptr
%tmp0 = tail call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8.p0i8(i8* %A, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 8)
%tmp1 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 0
@@ -369,7 +369,7 @@ define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind {
define <4 x i16> @vld3dupi16(i8* %A) nounwind {
;CHECK-LABEL: vld3dupi16:
;Check the (default) alignment value. VLD3 does not support alignment.
-;CHECK: vld3.16 {d16[], d17[], d18[]}, [r0]
+;CHECK: vld3.16 {d16[], d17[], d18[]}, [{{r[0-9]+|lr}}]
%tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16.p0i8(i8* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8)
%tmp1 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 0
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -391,7 +391,7 @@ declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16.p0i8(i8*, <4 x
;Check for a post-increment updating load.
define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind {
;CHECK-LABEL: vld4dupi16_update:
-;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [r1]!
+;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [{{r[0-9]+|lr}}]!
%A = load i16*, i16** %ptr
%A2 = bitcast i16* %A to i8*
%tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16.p0i8(i8* %A2, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1)
@@ -415,7 +415,7 @@ define <2 x i32> @vld4dupi32(i8* %A) nounwind {
;CHECK-LABEL: vld4dupi32:
;Check the alignment value. An 8-byte alignment is allowed here even though
;it is smaller than the total size of the memory being loaded.
-;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0:64]
+;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [{{r[0-9]+|lr}}:64]
%tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32.p0i8(i8* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8)
%tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll
index 866641f3fbbd9..f5c0f09ed4409 100644
--- a/test/CodeGen/ARM/vldlane.ll
+++ b/test/CodeGen/ARM/vldlane.ll
@@ -308,7 +308,7 @@ define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;Check for a post-increment updating load with register increment.
define <8 x i16> @vld3laneQi16_update(i16** %ptr, <8 x i16>* %B, i32 %inc) nounwind {
;CHECK-LABEL: vld3laneQi16_update:
-;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}], {{r[0-9]+}}
+;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+|lr}}], {{r[0-9]+}}
%A = load i16*, i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>, <8 x i16>* %B
diff --git a/test/CodeGen/ARM/vpadd.ll b/test/CodeGen/ARM/vpadd.ll
index 3409d37a31f4c..3fa93bb43f03d 100644
--- a/test/CodeGen/ARM/vpadd.ll
+++ b/test/CodeGen/ARM/vpadd.ll
@@ -285,17 +285,17 @@ define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ss
define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDL_s8:
; CHECK: @ BB#0:
-; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
-; CHECK-NEXT: vmov.i16 d18, #0x8
-; CHECK-NEXT: vneg.s16 d18, d18
-; CHECK-NEXT: vext.8 d19, d16, d16, #1
-; CHECK-NEXT: vshl.i16 d16, d16, #8
-; CHECK-NEXT: vshl.i16 d17, d19, #8
-; CHECK-NEXT: vshl.s16 d16, d16, d18
-; CHECK-NEXT: vshl.s16 d17, d17, d18
-; CHECK-NEXT: vadd.i16 d16, d17, d16
-; CHECK-NEXT: vstr d16, [r1]
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vmov.i16 d16, #0x8
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vext.8 d17, d18, d16, #1
+; CHECK-NEXT: vneg.s16 d16, d16
+; CHECK-NEXT: vshl.i16 d18, d18, #8
+; CHECK-NEXT: vshl.i16 d17, d17, #8
+; CHECK-NEXT: vshl.s16 d18, d18, d16
+; CHECK-NEXT: vshl.s16 d16, d17, d16
+; CHECK-NEXT: vadd.i16 d16, d16, d18
+; CHECK-NEXT: vstr d16, [r1]
+; CHECK-NEXT: mov pc, lr
%tmp = load <16 x i8>, <16 x i8>* %cbcr
%tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
diff --git a/test/CodeGen/ARM/vst1.ll b/test/CodeGen/ARM/vst1.ll
index 404129a7e6adf..e351a2ec23739 100644
--- a/test/CodeGen/ARM/vst1.ll
+++ b/test/CodeGen/ARM/vst1.ll
@@ -39,7 +39,7 @@ define void @vst1f(float* %A, <2 x float>* %B) nounwind {
;Check for a post-increment updating store.
define void @vst1f_update(float** %ptr, <2 x float>* %B) nounwind {
;CHECK-LABEL: vst1f_update:
-;CHECK: vst1.32 {d16}, [r1]!
+;CHECK: vst1.32 {d16}, [r{{[0-9]+}}]!
%A = load float*, float** %ptr
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <2 x float>, <2 x float>* %B
diff --git a/test/CodeGen/ARM/vst4.ll b/test/CodeGen/ARM/vst4.ll
index 1889551022905..afa4321c91a0a 100644
--- a/test/CodeGen/ARM/vst4.ll
+++ b/test/CodeGen/ARM/vst4.ll
@@ -12,7 +12,7 @@ define void @vst4i8(i8* %A, <8 x i8>* %B) nounwind {
;Check for a post-increment updating store with register increment.
define void @vst4i8_update(i8** %ptr, <8 x i8>* %B, i32 %inc) nounwind {
;CHECK-LABEL: vst4i8_update:
-;CHECK: vst4.8 {d16, d17, d18, d19}, [r1:128], r2
+;CHECK: vst4.8 {d16, d17, d18, d19}, [r{{[0-9]+}}:128], r2
%A = load i8*, i8** %ptr
%tmp1 = load <8 x i8>, <8 x i8>* %B
call void @llvm.arm.neon.vst4.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 16)
@@ -62,7 +62,7 @@ define void @vst4i64(i64* %A, <1 x i64>* %B) nounwind {
define void @vst4i64_update(i64** %ptr, <1 x i64>* %B) nounwind {
;CHECK-LABEL: vst4i64_update:
-;CHECK: vst1.64 {d16, d17, d18, d19}, [r1]!
+;CHECK: vst1.64 {d16, d17, d18, d19}, [r{{[0-9]+}}]!
%A = load i64*, i64** %ptr
%tmp0 = bitcast i64* %A to i8*
%tmp1 = load <1 x i64>, <1 x i64>* %B
@@ -116,8 +116,8 @@ define void @vst4Qf(float* %A, <4 x float>* %B) nounwind {
;Check for a post-increment updating store.
define void @vst4Qf_update(float** %ptr, <4 x float>* %B) nounwind {
;CHECK-LABEL: vst4Qf_update:
-;CHECK: vst4.32 {d16, d18, d20, d22}, [r1]!
-;CHECK: vst4.32 {d17, d19, d21, d23}, [r1]!
+ ;CHECK: vst4.32 {d16, d18, d20, d22}, [r[[REG:[0-9]+]]]!
+;CHECK: vst4.32 {d17, d19, d21, d23}, [r[[REG]]]!
%A = load float*, float** %ptr
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <4 x float>, <4 x float>* %B
diff --git a/test/CodeGen/ARM/vstlane.ll b/test/CodeGen/ARM/vstlane.ll
index 7e130ea01b643..49af0be92316b 100644
--- a/test/CodeGen/ARM/vstlane.ll
+++ b/test/CodeGen/ARM/vstlane.ll
@@ -127,7 +127,7 @@ define void @vst2lanei16(i16* %A, <4 x i16>* %B) nounwind {
;Check for a post-increment updating store with register increment.
define void @vst2lanei16_update(i16** %ptr, <4 x i16>* %B, i32 %inc) nounwind {
;CHECK-LABEL: vst2lanei16_update:
-;CHECK: vst2.16 {d16[1], d17[1]}, [r1], r2
+;CHECK: vst2.16 {d16[1], d17[1]}, [r{{[0-9]+}}], r{{[0-9]+}}
%A = load i16*, i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>, <4 x i16>* %B
@@ -251,7 +251,7 @@ define void @vst3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;Check for a post-increment updating store.
define void @vst3laneQi32_update(i32** %ptr, <4 x i32>* %B) nounwind {
;CHECK-LABEL: vst3laneQi32_update:
-;CHECK: vst3.32 {d16[0], d18[0], d20[0]}, [r1]!
+;CHECK: vst3.32 {d16[0], d18[0], d20[0]}, [r{{[0-9]+}}]!
%A = load i32*, i32** %ptr
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>, <4 x i32>* %B
@@ -292,7 +292,7 @@ define void @vst4lanei8(i8* %A, <8 x i8>* %B) nounwind {
;Check for a post-increment updating store.
define void @vst4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vst4lanei8_update:
-;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r1:32]!
+;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r{{[0-9]+}}:32]!
%A = load i8*, i8** %ptr
%tmp1 = load <8 x i8>, <8 x i8>* %B
call void @llvm.arm.neon.vst4lane.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll
index 0a5235df319fe..24090cfd6c651 100644
--- a/test/CodeGen/ARM/vuzp.ll
+++ b/test/CodeGen/ARM/vuzp.ll
@@ -324,26 +324,23 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8
; truncate from i32 to i16 and one vmovn.i16 to perform the final truncation for i8.
; CHECK-LABEL: cmpsel_trunc:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r4, r5, r11, lr}
-; CHECK-NEXT: push {r4, r5, r11, lr}
-; CHECK-NEXT: add r4, sp, #64
-; CHECK-NEXT: add r5, sp, #32
-; CHECK-NEXT: add r12, sp, #48
-; CHECK-NEXT: add lr, sp, #16
-; CHECK-NEXT: vld1.64 {d16, d17}, [r5]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r4]
-; CHECK-NEXT: vld1.64 {d20, d21}, [lr]
-; CHECK-NEXT: vld1.64 {d22, d23}, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vcgt.u32 q9, q11, q10
-; CHECK-NEXT: vmovn.i32 d17, q8
-; CHECK-NEXT: vmovn.i32 d16, q9
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vmovn.i16 d16, q8
-; CHECK-NEXT: vbsl d16, d19, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r4, r5, r11, lr}
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: add r12, sp, #48
+; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
+; CHECK-NEXT: add r12, sp, #32
+; CHECK-NEXT: vcgt.u32 q8, q10, q8
+; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
+; CHECK-NEXT: vcgt.u32 q9, q10, q9
+; CHECK-NEXT: vmov d20, r2, r3
+; CHECK-NEXT: vmovn.i32 d17, q8
+; CHECK-NEXT: vmovn.i32 d16, q9
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmovn.i16 d16, q8
+; CHECK-NEXT: vbsl d16, d18, d20
+; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%c = icmp ult <8 x i32> %cmp0, %cmp1
%res = select <8 x i1> %c, <8 x i8> %in0, <8 x i8> %in1
@@ -356,28 +353,28 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8
define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r4, lr}
-; CHECK-NEXT: push {r4, lr}
-; CHECK-NEXT: ldr r12, [sp, #40]
-; CHECK-NEXT: add lr, sp, #24
-; CHECK-NEXT: add r4, sp, #8
-; CHECK-NEXT: vld1.64 {d16, d17}, [r4]
-; CHECK-NEXT: vld1.64 {d18, d19}, [lr]
-; CHECK-NEXT: vld1.32 {d20[0]}, [r12:32]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmov.i8 d17, #0x7
-; CHECK-NEXT: vneg.s8 d17, d17
-; CHECK-NEXT: vmovl.u8 q9, d20
-; CHECK-NEXT: vuzp.8 d16, d18
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vbsl d16, d19, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r4, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: add lr, sp, #24
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: ldr r12, [sp, #40]
+; CHECK-NEXT: vld1.64 {d18, d19}, [lr]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vld1.32 {d18[0]}, [r12:32]
+; CHECK-NEXT: vmov.i8 d19, #0x7
+; CHECK-NEXT: vmovl.u8 q10, d18
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vneg.s8 d17, d19
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vuzp.8 d16, d20
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: pop {r11, lr}
+; CHECK-NEXT: mov pc, lr
<4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
%cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
%cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
@@ -392,25 +389,22 @@ define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_right:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push {r11, lr}
-; CHECK-NEXT: add r12, sp, #24
-; CHECK-NEXT: add lr, sp, #8
-; CHECK-NEXT: vld1.64 {d16, d17}, [lr]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmov.i8 d17, #0x7
-; CHECK-NEXT: vuzp.8 d16, d18
-; CHECK-NEXT: vneg.s8 d17, d17
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vbsl d16, d19, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r11, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vmov.i8 d18, #0x7
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vuzp.8 d16, d17
+; CHECK-NEXT: vneg.s8 d17, d18
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
<4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
%cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
%cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
@@ -423,26 +417,23 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1
define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_left:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push {r11, lr}
-; CHECK-NEXT: add r12, sp, #24
-; CHECK-NEXT: add lr, sp, #8
-; CHECK-NEXT: vldr d20, .LCPI22_0
-; CHECK-NEXT: vld1.64 {d16, d17}, [lr]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmov.i8 d17, #0x7
-; CHECK-NEXT: vtbl.8 d16, {d16}, d20
-; CHECK-NEXT: vneg.s8 d17, d17
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vbsl d16, d19, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r11, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vldr d18, .LCPI22_0
+; CHECK-NEXT: vmov.i8 d19, #0x7
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vtbl.8 d16, {d16}, d18
+; CHECK-NEXT: vneg.s8 d17, d19
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ BB#1:
; CHECK-NEXT: .LCPI22_0:
@@ -468,65 +459,63 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1,
; CHECK-LABEL: vuzp_wide_type:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT: .setfp r11, sp, #16
-; CHECK-NEXT: add r11, sp, #16
-; CHECK-NEXT: .pad #8
-; CHECK-NEXT: sub sp, sp, #8
-; CHECK-NEXT: bic sp, sp, #15
-; CHECK-NEXT: add r5, r11, #52
-; CHECK-NEXT: add r7, r11, #32
-; CHECK-NEXT: add r4, r11, #44
-; CHECK-NEXT: add r6, r11, #24
-; CHECK-NEXT: add r12, r11, #60
-; CHECK-NEXT: add lr, r11, #40
-; CHECK-NEXT: vld1.32 {d17[0]}, [r7:32]
-; CHECK-NEXT: vld1.32 {d19[0]}, [r5:32]
-; CHECK-NEXT: vld1.32 {d22[0]}, [r12:32]
-; CHECK-NEXT: ldr r12, [r11, #64]
-; CHECK-NEXT: vld1.32 {d20[0]}, [lr:32]
-; CHECK-NEXT: add r7, r11, #48
-; CHECK-NEXT: add r5, r11, #28
-; CHECK-NEXT: vld1.32 {d16[0]}, [r6:32]
-; CHECK-NEXT: vld1.32 {d18[0]}, [r4:32]
-; CHECK-NEXT: add r6, r11, #56
-; CHECK-NEXT: add r4, r11, #36
-; CHECK-NEXT: vcgt.u32 q10, q11, q10
-; CHECK-NEXT: vld1.32 {d19[1]}, [r6:32]
-; CHECK-NEXT: vld1.32 {d17[1]}, [r4:32]
-; CHECK-NEXT: add r6, r12, #4
-; CHECK-NEXT: vld1.32 {d18[1]}, [r7:32]
-; CHECK-NEXT: vld1.32 {d16[1]}, [r5:32]
-; CHECK-NEXT: ldr r7, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmovn.i32 d18, q10
-; CHECK-NEXT: vmov.32 d21[0], r7
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmov.u8 r7, d21[3]
-; CHECK-NEXT: vmov.i8 d17, #0x7
-; CHECK-NEXT: vuzp.8 d16, d18
-; CHECK-NEXT: vmov.8 d23[0], r7
-; CHECK-NEXT: vneg.s8 d17, d17
-; CHECK-NEXT: add r7, r11, #8
-; CHECK-NEXT: vldr d18, .LCPI23_0
-; CHECK-NEXT: vld1.8 {d23[1]}, [r6]
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vshl.s8 d20, d16, d17
-; CHECK-NEXT: vmov.i8 q8, #0x7
-; CHECK-NEXT: vneg.s8 q8, q8
-; CHECK-NEXT: vtbl.8 d22, {d20, d21}, d18
-; CHECK-NEXT: vld1.64 {d18, d19}, [r7]
-; CHECK-NEXT: vshl.i8 q10, q11, #7
-; CHECK-NEXT: vmov d23, r2, r3
-; CHECK-NEXT: vmov d22, r0, r1
-; CHECK-NEXT: vshl.s8 q8, q10, q8
-; CHECK-NEXT: vbsl q8, q11, q9
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: sub sp, r11, #16
-; CHECK-NEXT: pop {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: .save {r4, r10, r11, lr}
+; CHECK-NEXT: push {r4, r10, r11, lr}
+; CHECK-NEXT: .setfp r11, sp, #8
+; CHECK-NEXT: add r11, sp, #8
+; CHECK-NEXT: bic sp, sp, #15
+; CHECK-NEXT: add r12, r11, #32
+; CHECK-NEXT: add lr, r11, #60
+; CHECK-NEXT: vld1.32 {d17[0]}, [r12:32]
+; CHECK-NEXT: add r12, r11, #24
+; CHECK-NEXT: vld1.32 {d22[0]}, [lr:32]
+; CHECK-NEXT: add lr, r11, #36
+; CHECK-NEXT: vld1.32 {d16[0]}, [r12:32]
+; CHECK-NEXT: add r12, r11, #52
+; CHECK-NEXT: vld1.32 {d19[0]}, [r12:32]
+; CHECK-NEXT: add r12, r11, #44
+; CHECK-NEXT: vld1.32 {d17[1]}, [lr:32]
+; CHECK-NEXT: vld1.32 {d18[0]}, [r12:32]
+; CHECK-NEXT: add r12, r11, #40
+; CHECK-NEXT: vld1.32 {d20[0]}, [r12:32]
+; CHECK-NEXT: ldr r12, [r11, #64]
+; CHECK-NEXT: vcgt.u32 q10, q11, q10
+; CHECK-NEXT: ldr r4, [r12]
+; CHECK-NEXT: vmov.32 d25[0], r4
+; CHECK-NEXT: add r4, r11, #28
+; CHECK-NEXT: vld1.32 {d16[1]}, [r4:32]
+; CHECK-NEXT: add r4, r11, #56
+; CHECK-NEXT: vld1.32 {d19[1]}, [r4:32]
+; CHECK-NEXT: add r4, r11, #48
+; CHECK-NEXT: vmov.u8 lr, d25[3]
+; CHECK-NEXT: vld1.32 {d18[1]}, [r4:32]
+; CHECK-NEXT: add r4, r12, #4
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vmovn.i32 d19, q10
+; CHECK-NEXT: vldr d20, .LCPI23_0
+; CHECK-NEXT: vmov.i8 d18, #0x7
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vneg.s8 d17, d18
+; CHECK-NEXT: vuzp.8 d16, d19
+; CHECK-NEXT: vmov.i8 q9, #0x7
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vneg.s8 q9, q9
+; CHECK-NEXT: vshl.s8 d24, d16, d17
+; CHECK-NEXT: vmov.8 d17[0], lr
+; CHECK-NEXT: vtbl.8 d16, {d24, d25}, d20
+; CHECK-NEXT: vld1.8 {d17[1]}, [r4]
+; CHECK-NEXT: add r4, r11, #8
+; CHECK-NEXT: vshl.i8 q8, q8, #7
+; CHECK-NEXT: vld1.64 {d20, d21}, [r4]
+; CHECK-NEXT: vshl.s8 q8, q8, q9
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vbsl q8, q9, q10
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: sub sp, r11, #8
+; CHECK-NEXT: pop {r4, r10, r11, lr}
+; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ BB#1:
; CHECK-NEXT: .LCPI23_0:
diff --git a/test/CodeGen/BPF/remove_truncate_1.ll b/test/CodeGen/BPF/remove_truncate_1.ll
new file mode 100644
index 0000000000000..65433853b9d50
--- /dev/null
+++ b/test/CodeGen/BPF/remove_truncate_1.ll
@@ -0,0 +1,87 @@
+; RUN: llc < %s -march=bpf -verify-machineinstrs | FileCheck %s
+
+; Source code:
+; struct xdp_md {
+; unsigned data;
+; unsigned data_end;
+; };
+;
+; int gbl;
+; int xdp_dummy(struct xdp_md *xdp)
+; {
+; char tmp;
+; long addr;
+;
+; if (gbl) {
+; long addr1 = (long)xdp->data;
+; tmp = *(char *)addr1;
+; if (tmp == 1)
+; return 3;
+; } else {
+; tmp = *(volatile char *)(long)xdp->data_end;
+; if (tmp == 1)
+; return 2;
+; }
+; addr = (long)xdp->data;
+; tmp = *(volatile char *)addr;
+; if (tmp == 0)
+; return 1;
+; return 0;
+; }
+
+%struct.xdp_md = type { i32, i32 }
+
+@gbl = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind
+define i32 @xdp_dummy(%struct.xdp_md* nocapture readonly %xdp) local_unnamed_addr #0 {
+entry:
+ %0 = load i32, i32* @gbl, align 4
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then: ; preds = %entry
+ %data = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %xdp, i64 0, i32 0
+ %1 = load i32, i32* %data, align 4
+ %conv = zext i32 %1 to i64
+ %2 = inttoptr i64 %conv to i8*
+ %3 = load i8, i8* %2, align 1
+ %cmp = icmp eq i8 %3, 1
+ br i1 %cmp, label %cleanup20, label %if.end12
+; CHECK: r1 = *(u32 *)(r1 + 0)
+; CHECK: r2 = *(u8 *)(r1 + 0)
+
+if.else: ; preds = %entry
+ %data_end = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %xdp, i64 0, i32 1
+ %4 = load i32, i32* %data_end, align 4
+ %conv6 = zext i32 %4 to i64
+; CHECK: r2 = *(u32 *)(r1 + 4)
+ %5 = inttoptr i64 %conv6 to i8*
+ %6 = load volatile i8, i8* %5, align 1
+ %cmp8 = icmp eq i8 %6, 1
+ br i1 %cmp8, label %cleanup20, label %if.else.if.end12_crit_edge
+
+if.else.if.end12_crit_edge: ; preds = %if.else
+ %data13.phi.trans.insert = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %xdp, i64 0, i32 0
+ %.pre = load i32, i32* %data13.phi.trans.insert, align 4
+ br label %if.end12
+; CHECK: r1 = *(u32 *)(r1 + 0)
+
+if.end12: ; preds = %if.else.if.end12_crit_edge, %if.then
+ %7 = phi i32 [ %.pre, %if.else.if.end12_crit_edge ], [ %1, %if.then ]
+ %conv14 = zext i32 %7 to i64
+; CHECK-NOT: r1 <<= 32
+; CHECK-NOT: r1 >>= 32
+ %8 = inttoptr i64 %conv14 to i8*
+ %9 = load volatile i8, i8* %8, align 1
+; CHECK: r1 = *(u8 *)(r1 + 0)
+ %cmp16 = icmp eq i8 %9, 0
+ %.28 = zext i1 %cmp16 to i32
+ br label %cleanup20
+
+cleanup20: ; preds = %if.then, %if.end12, %if.else
+ %retval.1 = phi i32 [ 3, %if.then ], [ 2, %if.else ], [ %.28, %if.end12 ]
+ ret i32 %retval.1
+}
+
+attributes #0 = { norecurse nounwind }
diff --git a/test/CodeGen/BPF/remove_truncate_2.ll b/test/CodeGen/BPF/remove_truncate_2.ll
new file mode 100644
index 0000000000000..979d820dd857b
--- /dev/null
+++ b/test/CodeGen/BPF/remove_truncate_2.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s -march=bpf -verify-machineinstrs | FileCheck %s
+
+; Source code:
+; struct xdp_md {
+; unsigned data;
+; unsigned data_end;
+; };
+;
+; int gbl;
+; int xdp_dummy(struct xdp_md *xdp)
+; {
+; char addr = *(char *)(long)xdp->data;
+; if (gbl) {
+; if (gbl == 1)
+; return 1;
+; if (addr == 1)
+; return 3;
+; } else if (addr == 0)
+; return 2;
+; return 0;
+; }
+
+%struct.xdp_md = type { i32, i32 }
+
+@gbl = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @xdp_dummy(%struct.xdp_md* nocapture readonly %xdp) local_unnamed_addr #0 {
+entry:
+ %data = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %xdp, i64 0, i32 0
+ %0 = load i32, i32* %data, align 4
+ %conv = zext i32 %0 to i64
+ %1 = inttoptr i64 %conv to i8*
+ %2 = load i8, i8* %1, align 1
+; CHECK: r1 = *(u32 *)(r1 + 0)
+; CHECK: r1 = *(u8 *)(r1 + 0)
+ %3 = load i32, i32* @gbl, align 4
+ switch i32 %3, label %if.end [
+ i32 0, label %if.else
+ i32 1, label %cleanup
+ ]
+
+if.end: ; preds = %entry
+ %cmp4 = icmp eq i8 %2, 1
+; CHECK: r0 = 3
+; CHECK-NOT: r1 &= 255
+; CHECK: if r1 == 1 goto
+ br i1 %cmp4, label %cleanup, label %if.end13
+
+if.else: ; preds = %entry
+ %cmp9 = icmp eq i8 %2, 0
+; CHECK: r0 = 2
+; CHECK-NOT: r1 &= 255
+; CHECK: if r1 == 0 goto
+ br i1 %cmp9, label %cleanup, label %if.end13
+
+if.end13: ; preds = %if.else, %if.end
+ br label %cleanup
+
+cleanup: ; preds = %if.else, %if.end, %entry, %if.end13
+ %retval.0 = phi i32 [ 0, %if.end13 ], [ 1, %entry ], [ 3, %if.end ], [ 2, %if.else ]
+ ret i32 %retval.0
+}
+
+attributes #0 = { norecurse nounwind readonly }
diff --git a/test/CodeGen/Hexagon/addrmode-keepdeadphis.mir b/test/CodeGen/Hexagon/addrmode-keepdeadphis.mir
new file mode 100644
index 0000000000000..b77a7b1bd3655
--- /dev/null
+++ b/test/CodeGen/Hexagon/addrmode-keepdeadphis.mir
@@ -0,0 +1,30 @@
+# RUN: llc -march=hexagon -run-pass amode-opt %s -o - | FileCheck %s
+
+# Check that the addasl is not propagated into the addressing mode.
+# CHECK-NOT: L4_loadri_ur
+
+--- |
+ @g = global i32 zeroinitializer
+ define void @fred() { ret void }
+...
+
+---
+name: fred
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: %p0
+ %r0 = A2_tfrsi @g
+ %r1 = A2_tfrsi 1
+ %r2 = S2_addasl_rrri %r0, %r1, 1
+ J2_jumpt %p0, %bb.2, implicit-def %pc
+
+ bb.1:
+ liveins: %r0, %r2
+ %r1 = A2_tfrsi 2
+
+ bb.2:
+ liveins: %r0, %r2
+ %r3 = L2_loadri_io %r2, 0
+...
diff --git a/test/CodeGen/Hexagon/expand-condsets-undefvni.ll b/test/CodeGen/Hexagon/expand-condsets-undefvni.ll
new file mode 100644
index 0000000000000..45ba5131e6683
--- /dev/null
+++ b/test/CodeGen/Hexagon/expand-condsets-undefvni.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check that this compiles successfully.
+; CHECK: jumpr r31
+
+target triple = "hexagon"
+
+define i64 @fred(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b2:
+ %v3 = lshr i64 %a1, 52
+ %v4 = trunc i64 %v3 to i11
+ switch i11 %v4, label %b15 [
+ i11 -1, label %b5
+ i11 0, label %b14
+ ]
+
+b5: ; preds = %b2
+ br i1 undef, label %b13, label %b6
+
+b6: ; preds = %b5
+ %v7 = or i64 %a1, 2251799813685248
+ br i1 undef, label %b8, label %b10
+
+b8: ; preds = %b6
+ %v9 = select i1 undef, i64 %v7, i64 undef
+ br label %b16
+
+b10: ; preds = %b6
+ br i1 undef, label %b16, label %b11
+
+b11: ; preds = %b10
+ %v12 = select i1 undef, i64 undef, i64 %v7
+ br label %b16
+
+b13: ; preds = %b5
+ br label %b16
+
+b14: ; preds = %b2
+ br label %b16
+
+b15: ; preds = %b2
+ br label %b16
+
+b16: ; preds = %b15, %b14, %b13, %b11, %b10, %b8
+ %v17 = phi i64 [ undef, %b13 ], [ -2251799813685248, %b14 ], [ 0, %b15 ], [ %v12, %b11 ], [ %v9, %b8 ], [ %v7, %b10 ]
+ ret i64 %v17
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv62" }
diff --git a/test/CodeGen/Hexagon/expand-vselect-kill.ll b/test/CodeGen/Hexagon/expand-vselect-kill.ll
new file mode 100644
index 0000000000000..1d07859665c07
--- /dev/null
+++ b/test/CodeGen/Hexagon/expand-vselect-kill.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+;
+; Check that this does not crash.
+
+target triple = "hexagon"
+
+; CHECK-LABEL: danny:
+; CHECK-DAG: if ([[PREG:p[0-3]]]) [[VREG:v[0-9]+]]
+; CHECK-DAG: if (![[PREG]]) [[VREG]]
+define void @danny() local_unnamed_addr #0 {
+b0:
+ %v1 = icmp eq i32 0, undef
+ %v2 = select i1 %v1, <16 x i32> zeroinitializer, <16 x i32> undef
+ %v3 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v2, <16 x i32> zeroinitializer, i32 2)
+ %v4 = tail call <32 x i32> @llvm.hexagon.V6.vswap(<512 x i1> undef, <16 x i32> undef, <16 x i32> %v3)
+ %v5 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v4)
+ %v6 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %v5, i32 62)
+ %v7 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v6)
+ store <16 x i32> %v7, <16 x i32>* undef, align 64
+ unreachable
+}
+
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #2
+declare <32 x i32> @llvm.hexagon.V6.vswap(<512 x i1>, <16 x i32>, <16 x i32>) #2
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #2
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #2
+declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #2
+
+; CHECK-LABEL: sammy:
+; CHECK-DAG: if ([[PREG:p[0-3]]]) [[VREG:v[0-9]+]]
+; CHECK-DAG: if (![[PREG]]) [[VREG]]
+define void @sammy() local_unnamed_addr #1 {
+b0:
+ %v1 = icmp eq i32 0, undef
+ %v2 = select i1 %v1, <32 x i32> zeroinitializer, <32 x i32> undef
+ %v3 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %v2, <32 x i32> zeroinitializer, i32 2)
+ %v4 = tail call <64 x i32> @llvm.hexagon.V6.vswap.128B(<1024 x i1> undef, <32 x i32> undef, <32 x i32> %v3)
+ %v5 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v4)
+ %v6 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> %v5, i32 62)
+ %v7 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v6)
+ store <32 x i32> %v7, <32 x i32>* undef, align 128
+ unreachable
+}
+
+declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #2
+declare <64 x i32> @llvm.hexagon.V6.vswap.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #2
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #2
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #2
+declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #2
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx" }
+attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-double" }
+attributes #2 = { nounwind readnone }
diff --git a/test/CodeGen/Hexagon/fpelim-basic.ll b/test/CodeGen/Hexagon/fpelim-basic.ll
new file mode 100644
index 0000000000000..ffec07f7dbfe4
--- /dev/null
+++ b/test/CodeGen/Hexagon/fpelim-basic.ll
@@ -0,0 +1,91 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; FP elimination enabled.
+;
+; CHECK-LABEL: danny:
+; CHECK: r29 = add(r29,#-[[SIZE:[0-9]+]])
+; CHECK: r29 = add(r29,#[[SIZE]])
+define i32 @danny(i32 %a0, i32 %a1) local_unnamed_addr #0 {
+b2:
+ %v3 = alloca [32 x i32], align 8
+ %v4 = bitcast [32 x i32]* %v3 to i8*
+ call void @llvm.lifetime.start.p0i8(i64 128, i8* nonnull %v4) #3
+ br label %b5
+
+b5: ; preds = %b5, %b2
+ %v6 = phi i32 [ 0, %b2 ], [ %v8, %b5 ]
+ %v7 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v6
+ store i32 %v6, i32* %v7, align 4
+ %v8 = add nuw nsw i32 %v6, 1
+ %v9 = icmp eq i32 %v8, 32
+ br i1 %v9, label %b10, label %b5
+
+b10: ; preds = %b5
+ %v11 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %a0
+ store i32 %a1, i32* %v11, align 4
+ br label %b12
+
+b12: ; preds = %b12, %b10
+ %v13 = phi i32 [ 0, %b10 ], [ %v18, %b12 ]
+ %v14 = phi i32 [ 0, %b10 ], [ %v17, %b12 ]
+ %v15 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v13
+ %v16 = load i32, i32* %v15, align 4
+ %v17 = add nsw i32 %v16, %v14
+ %v18 = add nuw nsw i32 %v13, 1
+ %v19 = icmp eq i32 %v18, 32
+ br i1 %v19, label %b20, label %b12
+
+b20: ; preds = %b12
+ call void @llvm.lifetime.end.p0i8(i64 128, i8* nonnull %v4) #3
+ ret i32 %v17
+}
+
+; FP elimination disabled.
+;
+; CHECK-LABEL: sammy:
+; CHECK: allocframe
+; CHECK: dealloc_return
+define i32 @sammy(i32 %a0, i32 %a1) local_unnamed_addr #1 {
+b2:
+ %v3 = alloca [32 x i32], align 8
+ %v4 = bitcast [32 x i32]* %v3 to i8*
+ call void @llvm.lifetime.start.p0i8(i64 128, i8* nonnull %v4) #3
+ br label %b5
+
+b5: ; preds = %b5, %b2
+ %v6 = phi i32 [ 0, %b2 ], [ %v8, %b5 ]
+ %v7 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v6
+ store i32 %v6, i32* %v7, align 4
+ %v8 = add nuw nsw i32 %v6, 1
+ %v9 = icmp eq i32 %v8, 32
+ br i1 %v9, label %b10, label %b5
+
+b10: ; preds = %b5
+ %v11 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %a0
+ store i32 %a1, i32* %v11, align 4
+ br label %b12
+
+b12: ; preds = %b12, %b10
+ %v13 = phi i32 [ 0, %b10 ], [ %v18, %b12 ]
+ %v14 = phi i32 [ 0, %b10 ], [ %v17, %b12 ]
+ %v15 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v13
+ %v16 = load i32, i32* %v15, align 4
+ %v17 = add nsw i32 %v16, %v14
+ %v18 = add nuw nsw i32 %v13, 1
+ %v19 = icmp eq i32 %v18, 32
+ br i1 %v19, label %b20, label %b12
+
+b20: ; preds = %b12
+ call void @llvm.lifetime.end.p0i8(i64 128, i8* nonnull %v4) #3
+ ret i32 %v17
+}
+
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+
+attributes #0 = { nounwind readnone "no-frame-pointer-elim"="false" "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone "no-frame-pointer-elim"="true" "target-cpu"="hexagonv60" }
+attributes #2 = { argmemonly nounwind }
+attributes #3 = { nounwind }
diff --git a/test/CodeGen/Hexagon/frame.ll b/test/CodeGen/Hexagon/frame.ll
deleted file mode 100644
index e87acb8cd796b..0000000000000
--- a/test/CodeGen/Hexagon/frame.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-
-@num = external global i32
-@acc = external global i32
-@num2 = external global i32
-
-; CHECK: allocframe
-; CHECK: dealloc_return
-
-define i32 @foo() nounwind {
-entry:
- %i = alloca i32, align 4
- %0 = load i32, i32* @num, align 4
- store i32 %0, i32* %i, align 4
- %1 = load i32, i32* %i, align 4
- %2 = load i32, i32* @acc, align 4
- %mul = mul nsw i32 %1, %2
- %3 = load i32, i32* @num2, align 4
- %add = add nsw i32 %mul, %3
- store i32 %add, i32* %i, align 4
- %4 = load i32, i32* %i, align 4
- ret i32 %4
-}
diff --git a/test/CodeGen/Hexagon/jt-in-text.ll b/test/CodeGen/Hexagon/jt-in-text.ll
new file mode 100644
index 0000000000000..62b5caef6aaa1
--- /dev/null
+++ b/test/CodeGen/Hexagon/jt-in-text.ll
@@ -0,0 +1,57 @@
+; RUN: llc -hexagon-emit-jt-text=true < %s | FileCheck %s
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon-unknown--elf"
+
+; CHECK: .text
+; CHECK-NOT: .rodata
+; CHECK: .word
+
+@lane0_pwr_st = global i32 0, align 4
+@lane1_pwr_st = global i32 0, align 4
+@lane2_pwr_st = global i32 0, align 4
+@lane3_pwr_st = global i32 0, align 4
+
+; Function Attrs: noinline nounwind
+define void @test2(i32 %lane_id, i32 %rx_pwr_st) #0 {
+entry:
+ %lane_id.addr = alloca i32, align 4
+ %rx_pwr_st.addr = alloca i32, align 4
+ store i32 %lane_id, i32* %lane_id.addr, align 4
+ store i32 %rx_pwr_st, i32* %rx_pwr_st.addr, align 4
+ %0 = load i32, i32* %lane_id.addr, align 4
+ switch i32 %0, label %sw.epilog [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ i32 15, label %sw.bb4
+ ]
+
+sw.bb: ; preds = %entry
+ store i32 1, i32* @lane0_pwr_st, align 4
+ br label %sw.epilog
+
+sw.bb1: ; preds = %entry
+ store i32 1, i32* @lane1_pwr_st, align 4
+ br label %sw.epilog
+
+sw.bb2: ; preds = %entry
+ store i32 1, i32* @lane2_pwr_st, align 4
+ br label %sw.epilog
+
+sw.bb3: ; preds = %entry
+ store i32 1, i32* @lane3_pwr_st, align 4
+ br label %sw.epilog
+
+sw.bb4: ; preds = %entry
+ store i32 1, i32* @lane0_pwr_st, align 4
+ store i32 1, i32* @lane1_pwr_st, align 4
+ store i32 1, i32* @lane2_pwr_st, align 4
+ store i32 1, i32* @lane3_pwr_st, align 4
+ br label %sw.epilog
+
+sw.epilog: ; preds = %entry, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
+ ret void
+}
+
+attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/newvaluejump-kill2.mir b/test/CodeGen/Hexagon/newvaluejump-kill2.mir
new file mode 100644
index 0000000000000..565d07dc87ee6
--- /dev/null
+++ b/test/CodeGen/Hexagon/newvaluejump-kill2.mir
@@ -0,0 +1,18 @@
+# RUN: llc -march=hexagon -run-pass hexagon-nvj -verify-machineinstrs %s -o - | FileCheck %s
+# CHECK: J4_cmpgtu_t_jumpnv_t killed %r3, killed %r1, %bb.1, implicit-def %pc
+
+---
+name: fred
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: %r0
+ %r1 = A2_addi %r0, -1
+ %r2 = A2_tfrsi -1431655765
+ %r3 = A2_tfrsi 2
+ %p0 = C2_cmpgtu killed %r3, %r1
+ %r2 = S4_subaddi killed %r1, 1, killed %r2
+ J2_jumpt killed %p0, %bb.1, implicit-def %pc
+ bb.1:
+...
diff --git a/test/CodeGen/Hexagon/newvaluejump2.ll b/test/CodeGen/Hexagon/newvaluejump2.ll
index 4c897f0830f37..fbc3f2925d19b 100644
--- a/test/CodeGen/Hexagon/newvaluejump2.ll
+++ b/test/CodeGen/Hexagon/newvaluejump2.ll
@@ -6,7 +6,7 @@
@Reg = common global i32 0, align 4
define i32 @main() nounwind {
entry:
-; CHECK: if (cmp.gt(r{{[0-9]+}},r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
+; CHECK: if (cmp.gt(r{{[0-9]+}}.new,r{{[0-9]+}})) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
%Reg2 = alloca i32, align 4
%0 = load i32, i32* %Reg2, align 4
%1 = load i32, i32* @Reg, align 4
diff --git a/test/CodeGen/Hexagon/regalloc-liveout-undef.mir b/test/CodeGen/Hexagon/regalloc-liveout-undef.mir
new file mode 100644
index 0000000000000..6a41514b060e0
--- /dev/null
+++ b/test/CodeGen/Hexagon/regalloc-liveout-undef.mir
@@ -0,0 +1,35 @@
+# RUN: llc -march=hexagon -run-pass liveintervals -run-pass machineverifier -run-pass simple-register-coalescing %s -o - | FileCheck %s
+#
+# If there is no consumer of the live intervals, the live intervals pass
+# will be freed immediately after it runs, before the verifier. Add a
+# user (register coalescer in this case), so that the verification will
+# cover live intervals as well.
+#
+# Make sure that this compiles successfully.
+# CHECK: undef %1.isub_lo = A2_addi %1.isub_lo, 1
+
+---
+name: fred
+tracksRegLiveness: true
+
+registers:
+ - { id: 0, class: intregs }
+ - { id: 1, class: doubleregs }
+ - { id: 2, class: predregs }
+ - { id: 3, class: doubleregs }
+body: |
+ bb.0:
+ liveins: %d0
+ successors: %bb.1
+ %0 = IMPLICIT_DEF
+ %1 = COPY %d0
+
+ bb.1:
+ successors: %bb.1
+ %2 = C2_cmpgt %0, %1.isub_lo
+ %3 = COPY %1
+ %1 = COPY %3
+ undef %1.isub_lo = A2_addi %1.isub_lo, 1
+ J2_jump %bb.1, implicit-def %pc
+...
+
diff --git a/test/CodeGen/MIR/Generic/multiRunPass.mir b/test/CodeGen/MIR/Generic/multiRunPass.mir
index bd1c0d0b458e5..e055c44205b5e 100644
--- a/test/CodeGen/MIR/Generic/multiRunPass.mir
+++ b/test/CodeGen/MIR/Generic/multiRunPass.mir
@@ -7,7 +7,8 @@
# This test ensures that the command line accepts
# several run passes on the same command line and
# actually create the proper pipeline for it.
-# PSEUDO_PEEPHOLE: -expand-isel-pseudos {{(-machineverifier )?}}-peephole-opt
+# PSEUDO_PEEPHOLE: -expand-isel-pseudos
+# PSEUDO_PEEPHOLE-SAME: {{(-machineverifier )?}}-peephole-opt
# PEEPHOLE_PSEUDO: -peephole-opt {{(-machineverifier )?}}-expand-isel-pseudos
# Make sure there are no other passes happening after what we asked.
diff --git a/test/CodeGen/Mips/2008-06-05-Carry.ll b/test/CodeGen/Mips/2008-06-05-Carry.ll
index 5e6092fc7848d..c61e1cdedea78 100644
--- a/test/CodeGen/Mips/2008-06-05-Carry.ll
+++ b/test/CodeGen/Mips/2008-06-05-Carry.ll
@@ -2,21 +2,20 @@
define i64 @add64(i64 %u, i64 %v) nounwind {
entry:
-; CHECK-LABEL: add64:
; CHECK: addu
-; CHECK-DAG: sltu
-; CHECK-DAG: addu
+; CHECK: sltu
; CHECK: addu
- %tmp2 = add i64 %u, %v
+; CHECK: addu
+ %tmp2 = add i64 %u, %v
ret i64 %tmp2
}
define i64 @sub64(i64 %u, i64 %v) nounwind {
entry:
-; CHECK-LABEL: sub64
-; CHECK-DAG: sltu
-; CHECK-DAG: subu
+; CHECK: sub64
; CHECK: subu
+; CHECK: sltu
+; CHECK: addu
; CHECK: subu
%tmp2 = sub i64 %u, %v
ret i64 %tmp2
diff --git a/test/CodeGen/Mips/dsp-patterns.ll b/test/CodeGen/Mips/dsp-patterns.ll
index 250d3eff37dc5..837c0d8bfc52b 100644
--- a/test/CodeGen/Mips/dsp-patterns.ll
+++ b/test/CodeGen/Mips/dsp-patterns.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=mips -mcpu=mips32r2 -mattr=dsp < %s | FileCheck %s -check-prefix=R1
-; RUN: llc -march=mips -mcpu=mips32r2 -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
+; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s -check-prefix=R1
+; RUN: llc -march=mips -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
; R1-LABEL: test_lbux:
; R1: lbux ${{[0-9]+}}
diff --git a/test/CodeGen/Mips/llcarry.ll b/test/CodeGen/Mips/llcarry.ll
index b7cc6fc8ea757..fcf129420234c 100644
--- a/test/CodeGen/Mips/llcarry.ll
+++ b/test/CodeGen/Mips/llcarry.ll
@@ -14,9 +14,9 @@ entry:
%add = add nsw i64 %1, %0
store i64 %add, i64* @k, align 8
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $24
+; 16: move ${{[0-9]+}}, $t8
+; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
ret void
}
@@ -28,8 +28,8 @@ entry:
%sub = sub nsw i64 %0, %1
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $24
-; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
+; 16: move ${{[0-9]+}}, $t8
+; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
store i64 %sub, i64* @l, align 8
ret void
@@ -41,7 +41,8 @@ entry:
%add = add nsw i64 %0, 15
; 16: addiu ${{[0-9]+}}, 15
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $24
+; 16: move ${{[0-9]+}}, $t8
+; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
store i64 %add, i64* @m, align 8
ret void
diff --git a/test/CodeGen/Mips/llvm-ir/add.ll b/test/CodeGen/Mips/llvm-ir/add.ll
index 63884eb03b8c5..a5ecdda94ce2f 100644
--- a/test/CodeGen/Mips/llvm-ir/add.ll
+++ b/test/CodeGen/Mips/llvm-ir/add.ll
@@ -1,35 +1,35 @@
; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32,PRE4
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32,GP32-CMOV
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN: -check-prefixes=ALL,R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN: -check-prefixes=ALL,R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN: -check-prefixes=ALL,R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN: -check-prefixes=ALL,R2-R6,GP64
; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -O2 -verify-machineinstrs | FileCheck %s \
-; RUN: -check-prefixes=ALL,MMR3,MM32
+; RUN: -check-prefixes=ALL,MMR6,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -O2 | FileCheck %s \
; RUN: -check-prefixes=ALL,MMR6,MM32
; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -O2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,MM64
+; RUN: -check-prefixes=ALL,MMR6,MM64
; FIXME: This code sequence is inefficient as it should be 'subu $[[T0]], $zero, $[[T0]'.
@@ -110,17 +110,17 @@ define signext i64 @add_i64(i64 signext %a, i64 signext %b) {
entry:
; ALL-LABEL: add_i64:
- ; GP32-DAG: addu $[[T0:[0-9]+]], $4, $6
- ; GP32-DAG: addu $3, $5, $7
- ; GP32: sltu $[[T1:[0-9]+]], $3, $5
- ; GP32: addu $2, $[[T0]], $[[T1]]
+ ; GP32: addu $3, $5, $7
+ ; GP32: sltu $[[T0:[0-9]+]], $3, $7
+ ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP32: addu $2, $4, $[[T1]]
; GP64: daddu $2, $4, $5
- ; MM32-DAG: addu16 $3, $5, $7
- ; MM32-DAG: addu16 $[[T0:[0-9]+]], $4, $6
- ; MM32: sltu $[[T1:[0-9]+]], $3, $5
- ; MM32: addu16 $2, $[[T0]], $[[T1]]
+ ; MM32: addu16 $3, $5, $7
+ ; MM32: sltu $[[T0:[0-9]+]], $3, $7
+ ; MM32: addu $[[T1:[0-9]+]], $[[T0]], $6
+ ; MM32: addu $2, $4, $[[T1]]
; MM64: daddu $2, $4, $5
@@ -132,108 +132,49 @@ define signext i128 @add_i128(i128 signext %a, i128 signext %b) {
entry:
; ALL-LABEL: add_i128:
- ; PRE4: move $[[R1:[0-9]+]], $5
- ; PRE4: move $[[R2:[0-9]+]], $4
- ; PRE4: lw $[[R3:[0-9]+]], 24($sp)
- ; PRE4: addu $[[R4:[0-9]+]], $6, $[[R3]]
- ; PRE4: lw $[[R5:[0-9]+]], 28($sp)
- ; PRE4: addu $[[R6:[0-9]+]], $7, $[[R5]]
- ; PRE4: sltu $[[R7:[0-9]+]], $[[R6]], $7
- ; PRE4: addu $[[R8:[0-9]+]], $[[R4]], $[[R7]]
- ; PRE4: xor $[[R9:[0-9]+]], $[[R8]], $6
- ; PRE4: sltiu $[[R10:[0-9]+]], $[[R9]], 1
- ; PRE4: bnez $[[R10]], $BB5_2
- ; PRE4: sltu $[[R7]], $[[R8]], $6
- ; PRE4: lw $[[R12:[0-9]+]], 20($sp)
- ; PRE4: addu $[[R13:[0-9]+]], $[[R1]], $[[R12]]
- ; PRE4: lw $[[R14:[0-9]+]], 16($sp)
- ; PRE4: addu $[[R15:[0-9]+]], $[[R13]], $[[R7]]
- ; PRE4: addu $[[R16:[0-9]+]], $[[R2]], $[[R14]]
- ; PRE4: sltu $[[R17:[0-9]+]], $[[R15]], $[[R13]]
- ; PRE4: sltu $[[R18:[0-9]+]], $[[R13]], $[[R1]]
- ; PRE4: addu $[[R19:[0-9]+]], $[[R16]], $[[R18]]
- ; PRE4: addu $2, $[[R19]], $[[R17]]
-
- ; GP32-CMOV: lw $[[T0:[0-9]+]], 24($sp)
- ; GP32-CMOV: addu $[[T1:[0-9]+]], $6, $[[T0]]
- ; GP32-CMOV: lw $[[T2:[0-9]+]], 28($sp)
- ; GP32-CMOV: addu $[[T3:[0-9]+]], $7, $[[T2]]
- ; GP32-CMOV: sltu $[[T4:[0-9]+]], $[[T3]], $7
- ; GP32-CMOV: addu $[[T5:[0-9]+]], $[[T1]], $[[T4]]
- ; GP32-CMOV: sltu $[[T6:[0-9]+]], $[[T5]], $6
- ; GP32-CMOV: xor $[[T7:[0-9]+]], $[[T5]], $6
- ; GP32-CMOV: movz $[[T8:[0-9]+]], $[[T4]], $[[T7]]
- ; GP32-CMOV: lw $[[T9:[0-9]+]], 20($sp)
- ; GP32-CMOV: addu $[[T10:[0-9]+]], $5, $[[T4]]
- ; GP32-CMOV: addu $[[T11:[0-9]+]], $[[T10]], $[[T8]]
- ; GP32-CMOV: lw $[[T12:[0-9]+]], 16($sp)
- ; GP32-CMOV: sltu $[[T13:[0-9]+]], $[[T11]], $[[T10]]
- ; GP32-CMOV: addu $[[T14:[0-9]+]], $4, $[[T12]]
- ; GP32-CMOV: sltu $[[T15:[0-9]+]], $[[T10]], $5
- ; GP32-CMOV: addu $[[T16:[0-9]+]], $[[T14]], $[[T15]]
- ; GP32-CMOV: addu $[[T17:[0-9]+]], $[[T16]], $[[T13]]
- ; GP32-CMOV: move $4, $[[T5]]
- ; GP32-CMOV: move $5, $[[T3]]
-
- ; GP64: daddu $[[T0:[0-9]+]], $4, $6
- ; GP64: daddu $[[T1:[0-9]+]], $5, $7
- ; GP64: sltu $[[T2:[0-9]+]], $[[T1]], $5
- ; GP64-NOT-R2-R6: dsll $[[T3:[0-9]+]], $[[T2]], 32
- ; GP64-NOT-R2-R6: dsrl $[[T4:[0-9]+]], $[[T3]], 32
- ; GP64-R2-R6: dext $[[T4:[0-9]+]], $[[T2]], 0, 32
-
- ; GP64: daddu $2, $[[T0]], $[[T4]]
-
- ; MMR3: move $[[T1:[0-9]+]], $5
- ; MMR3-DAG: lw $[[T2:[0-9]+]], 32($sp)
- ; MMR3: addu16 $[[T3:[0-9]+]], $6, $[[T2]]
- ; MMR3-DAG: lw $[[T4:[0-9]+]], 36($sp)
- ; MMR3: addu16 $[[T5:[0-9]+]], $7, $[[T4]]
- ; MMR3: sltu $[[T6:[0-9]+]], $[[T5]], $7
- ; MMR3: addu16 $[[T7:[0-9]+]], $[[T3]], $[[T6]]
- ; MMR3: sltu $[[T8:[0-9]+]], $[[T7]], $6
- ; MMR3: xor $[[T9:[0-9]+]], $[[T7]], $6
- ; MMR3: movz $[[T8]], $[[T6]], $[[T9]]
- ; MMR3: lw $[[T10:[0-9]+]], 28($sp)
- ; MMR3: addu16 $[[T11:[0-9]+]], $[[T1]], $[[T10]]
- ; MMR3: addu16 $[[T12:[0-9]+]], $[[T11]], $[[T8]]
- ; MMR3: lw $[[T13:[0-9]+]], 24($sp)
- ; MMR3: sltu $[[T14:[0-9]+]], $[[T12]], $[[T11]]
- ; MMR3: addu16 $[[T15:[0-9]+]], $4, $[[T13]]
- ; MMR3: sltu $[[T16:[0-9]+]], $[[T11]], $[[T1]]
- ; MMR3: addu16 $[[T17:[0-9]+]], $[[T15]], $[[T16]]
- ; MMR3: addu16 $2, $2, $[[T14]]
-
- ; MMR6: move $[[T1:[0-9]+]], $5
- ; MMR6: move $[[T2:[0-9]+]], $4
- ; MMR6: lw $[[T3:[0-9]+]], 32($sp)
- ; MMR6: addu16 $[[T4:[0-9]+]], $6, $[[T3]]
- ; MMR6: lw $[[T5:[0-9]+]], 36($sp)
- ; MMR6: addu16 $[[T6:[0-9]+]], $7, $[[T5]]
- ; MMR6: sltu $[[T7:[0-9]+]], $[[T6]], $7
- ; MMR6: addu16 $[[T8:[0-9]+]], $[[T4]], $7
- ; MMR6: sltu $[[T9:[0-9]+]], $[[T8]], $6
- ; MMR6: xor $[[T10:[0-9]+]], $[[T4]], $6
- ; MMR6: sltiu $[[T11:[0-9]+]], $[[T10]], 1
- ; MMR6: seleqz $[[T12:[0-9]+]], $[[T9]], $[[T11]]
- ; MMR6: selnez $[[T13:[0-9]+]], $[[T7]], $[[T11]]
- ; MMR6: lw $[[T14:[0-9]+]], 24($sp)
- ; MMR6: or $[[T15:[0-9]+]], $[[T13]], $[[T12]]
- ; MMR6: addu16 $[[T16:[0-9]+]], $[[T2]], $[[T14]]
- ; MMR6: lw $[[T17:[0-9]+]], 28($sp)
- ; MMR6: addu16 $[[T18:[0-9]+]], $[[T1]], $[[T17]]
- ; MMR6: addu16 $[[T19:[0-9]+]], $[[T18]], $[[T15]]
- ; MMR6: sltu $[[T20:[0-9]+]], $[[T18]], $[[T1]]
- ; MMR6: sltu $[[T21:[0-9]+]], $[[T17]], $[[T18]]
- ; MMR6: addu16 $2, $[[T16]], $[[T20]]
- ; MMR6: addu16 $2, $[[T20]], $[[T21]]
-
- ; MM64: daddu $[[T0:[0-9]+]], $4, $6
+ ; GP32: lw $[[T0:[0-9]+]], 28($sp)
+ ; GP32: addu $[[T1:[0-9]+]], $7, $[[T0]]
+ ; GP32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 24($sp)
+ ; GP32: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP32: addu $[[T5:[0-9]+]], $6, $[[T4]]
+ ; GP32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+ ; GP32: lw $[[T7:[0-9]+]], 20($sp)
+ ; GP32: addu $[[T8:[0-9]+]], $[[T6]], $[[T7]]
+ ; GP32: lw $[[T9:[0-9]+]], 16($sp)
+ ; GP32: addu $3, $5, $[[T8]]
+ ; GP32: sltu $[[T10:[0-9]+]], $3, $[[T7]]
+ ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T9]]
+ ; GP32: addu $2, $4, $[[T11]]
+ ; GP32: move $4, $[[T5]]
+ ; GP32: move $5, $[[T1]]
+
+ ; GP64: daddu $3, $5, $7
+ ; GP64: sltu $[[T0:[0-9]+]], $3, $7
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP64: daddu $2, $4, $[[T1]]
+
+ ; MM32: lw $[[T0:[0-9]+]], 28($sp)
+ ; MM32: addu $[[T1:[0-9]+]], $7, $[[T0]]
+ ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; MM32: lw $[[T3:[0-9]+]], 24($sp)
+ ; MM32: addu16 $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; MM32: addu16 $[[T5:[0-9]+]], $6, $[[T4]]
+ ; MM32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+ ; MM32: lw $[[T7:[0-9]+]], 20($sp)
+ ; MM32: addu16 $[[T8:[0-9]+]], $[[T6]], $[[T7]]
+ ; MM32: lw $[[T9:[0-9]+]], 16($sp)
+ ; MM32: addu16 $[[T10:[0-9]+]], $5, $[[T8]]
+ ; MM32: sltu $[[T11:[0-9]+]], $[[T10]], $[[T7]]
+ ; MM32: addu $[[T12:[0-9]+]], $[[T11]], $[[T9]]
+ ; MM32: addu16 $[[T13:[0-9]+]], $4, $[[T12]]
+ ; MM32: move $4, $[[T5]]
+ ; MM32: move $5, $[[T1]]
+
; MM64: daddu $3, $5, $7
- ; MM64: sltu $[[T1:[0-9]+]], $3, $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $[[T0]], $[[T3]]
+ ; MM64: sltu $[[T0:[0-9]+]], $3, $7
+ ; MM64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; MM64: daddu $2, $4, $[[T1]]
%r = add i128 %a, %b
ret i128 %r
@@ -308,16 +249,17 @@ define signext i32 @add_i32_4(i32 signext %a) {
define signext i64 @add_i64_4(i64 signext %a) {
; ALL-LABEL: add_i64_4:
- ; GP32: addiu $3, $5, 4
- ; GP32: sltu $[[T0:[0-9]+]], $3, $5
- ; GP32: addu $2, $4, $[[T0]]
-
- ; MM32: addiur2 $[[T1:[0-9]+]], $5, 4
- ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $5
- ; MM32: addu16 $2, $4, $[[T2]]
+ ; GP32: addiu $[[T0:[0-9]+]], $5, 4
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 4
+ ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP32: addu $2, $4, $[[T1]]
; GP64: daddiu $2, $4, 4
+ ; MM32: addiu $[[T0:[0-9]+]], $5, 4
+ ; MM32: li16 $[[T1:[0-9]+]], 4
+ ; MM32: sltu $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+ ; MM32: addu $2, $4, $[[T2]]
; MM64: daddiu $2, $4, 4
@@ -328,67 +270,38 @@ define signext i64 @add_i64_4(i64 signext %a) {
define signext i128 @add_i128_4(i128 signext %a) {
; ALL-LABEL: add_i128_4:
- ; PRE4: move $[[T0:[0-9]+]], $5
- ; PRE4: addiu $[[T1:[0-9]+]], $7, 4
- ; PRE4: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; PRE4: xori $[[T3:[0-9]+]], $[[T2]], 1
- ; PRE4: bnez $[[T3]], $BB[[BB0:[0-9_]+]]
- ; PRE4: addu $[[T4:[0-9]+]], $6, $[[T2]]
- ; PRE4: sltu $[[T5:[0-9]+]], $[[T4]], $6
- ; PRE4; $BB[[BB0:[0-9]+]]:
- ; PRE4: addu $[[T6:[0-9]+]], $[[T0]], $[[T5]]
- ; PRE4: sltu $[[T7:[0-9]+]], $[[T6]], $[[T0]]
- ; PRE4: addu $[[T8:[0-9]+]], $4, $[[T7]]
- ; PRE4: move $4, $[[T4]]
-
- ; GP32-CMOV: addiu $[[T0:[0-9]+]], $7, 4
- ; GP32-CMOV: sltu $[[T1:[0-9]+]], $[[T0]], $7
- ; GP32-CMOV: addu $[[T2:[0-9]+]], $6, $[[T1]]
- ; GP32-CMOV: sltu $[[T3:[0-9]+]], $[[T2]], $6
- ; GP32-CMOV: movz $[[T3]], $[[T1]], $[[T1]]
- ; GP32-CMOV: addu $[[T4:[0-9]+]], $5, $[[T3]]
- ; GP32-CMOV: sltu $[[T5:[0-9]+]], $[[T4]], $5
- ; GP32-CMOV: addu $[[T7:[0-9]+]], $4, $[[T5]]
- ; GP32-CMOV: move $4, $[[T2]]
- ; GP32-CMOV: move $5, $[[T0]]
-
- ; GP64: daddiu $[[T0:[0-9]+]], $5, 4
- ; GP64: sltu $[[T1:[0-9]+]], $[[T0]], $5
- ; GP64-NOT-R2-R6: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; GP64-NOT-R2-R6: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; GP64-R2-R6: dext $[[T3:[0-9]+]], $[[T1]], 0, 32
-
- ; GP64: daddu $2, $4, $[[T3]]
-
- ; MMR3: addiur2 $[[T0:[0-9]+]], $7, 4
- ; MMR3: sltu $[[T1:[0-9]+]], $[[T0]], $7
- ; MMR3: sltu $[[T2:[0-9]+]], $[[T0]], $7
- ; MMR3: addu16 $[[T3:[0-9]+]], $6, $[[T2]]
- ; MMR3: sltu $[[T4:[0-9]+]], $[[T3]], $6
- ; MMR3: movz $[[T4]], $[[T2]], $[[T1]]
- ; MMR3: addu16 $[[T6:[0-9]+]], $5, $[[T4]]
- ; MMR3: sltu $[[T7:[0-9]+]], $[[T6]], $5
- ; MMR3: addu16 $2, $4, $[[T7]]
-
- ; MMR6: addiur2 $[[T1:[0-9]+]], $7, 4
- ; MMR6: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; MMR6: xori $[[T3:[0-9]+]], $[[T2]], 1
- ; MMR6: selnez $[[T4:[0-9]+]], $[[T2]], $[[T3]]
- ; MMR6: addu16 $[[T5:[0-9]+]], $6, $[[T2]]
- ; MMR6: sltu $[[T6:[0-9]+]], $[[T5]], $6
- ; MMR6: seleqz $[[T7:[0-9]+]], $[[T6]], $[[T3]]
- ; MMR6: or $[[T8:[0-9]+]], $[[T4]], $[[T7]]
- ; MMR6: addu16 $[[T9:[0-9]+]], $5, $[[T8]]
- ; MMR6: sltu $[[T10:[0-9]+]], $[[T9]], $5
- ; MMR6: addu16 $[[T11:[0-9]+]], $4, $[[T10]]
- ; MMR6: move $4, $7
- ; MMR6: move $5, $[[T1]]
+ ; GP32: addiu $[[T0:[0-9]+]], $7, 4
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 4
+ ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP32: addu $[[T2:[0-9]+]], $6, $[[T1]]
+ ; GP32: sltu $[[T1]], $[[T2]], $zero
+ ; GP32: addu $[[T3:[0-9]+]], $5, $[[T1]]
+ ; GP32: sltu $[[T1]], $[[T3]], $zero
+ ; GP32: addu $[[T1]], $4, $[[T1]]
+ ; GP32: move $4, $[[T2]]
+ ; GP32: move $5, $[[T0]]
+
+ ; GP64: daddiu $[[T0:[0-9]+]], $5, 4
+ ; GP64: daddiu $[[T1:[0-9]+]], $zero, 4
+ ; GP64: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP64: daddu $2, $4, $[[T1]]
+
+ ; MM32: addiu $[[T0:[0-9]+]], $7, 4
+ ; MM32: li16 $[[T1:[0-9]+]], 4
+ ; MM32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; MM32: addu16 $[[T2:[0-9]+]], $6, $[[T1]]
+ ; MM32: li16 $[[T1]], 0
+ ; MM32: sltu $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; MM32: addu16 $[[T3]], $5, $[[T3]]
+ ; MM32: sltu $[[T1]], $[[T3]], $[[T1]]
+ ; MM32: addu16 $[[T1]], $4, $[[T1]]
+ ; MM32: move $4, $[[T2]]
+ ; MM32: move $5, $[[T0]]
; MM64: daddiu $[[T0:[0-9]+]], $5, 4
- ; MM64: sltu $[[T1:[0-9]+]], $[[T0]], $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $4, $[[T3]]
+ ; MM64: daddiu $[[T1:[0-9]+]], $zero, 4
+ ; MM64: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; MM64: daddu $2, $4, $[[T1]]
%r = add i128 4, %a
ret i128 %r
@@ -467,15 +380,16 @@ define signext i64 @add_i64_3(i64 signext %a) {
; ALL-LABEL: add_i64_3:
; GP32: addiu $[[T0:[0-9]+]], $5, 3
- ; GP32: sltu $[[T1:[0-9]+]], $[[T0]], $5
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 3
+ ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
; GP32: addu $2, $4, $[[T1]]
; GP64: daddiu $2, $4, 3
- ; MM32: move $[[T1:[0-9]+]], $5
- ; MM32: addius5 $[[T1]], 3
- ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $5
- ; MM32: addu16 $2, $4, $[[T2]]
+ ; MM32: addiu $[[T0:[0-9]+]], $5, 3
+ ; MM32: li16 $[[T1:[0-9]+]], 3
+ ; MM32: sltu $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+ ; MM32: addu $2, $4, $[[T2]]
; MM64: daddiu $2, $4, 3
@@ -486,70 +400,38 @@ define signext i64 @add_i64_3(i64 signext %a) {
define signext i128 @add_i128_3(i128 signext %a) {
; ALL-LABEL: add_i128_3:
- ; PRE4: move $[[T0:[0-9]+]], $5
- ; PRE4: addiu $[[T1:[0-9]+]], $7, 3
- ; PRE4: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; PRE4: xori $[[T3:[0-9]+]], $[[T2]], 1
- ; PRE4: bnez $[[T3]], $BB[[BB0:[0-9_]+]]
- ; PRE4: addu $[[T4:[0-9]+]], $6, $[[T2]]
- ; PRE4: sltu $[[T5:[0-9]+]], $[[T4]], $6
- ; PRE4; $BB[[BB0:[0-9]+]]:
- ; PRE4: addu $[[T6:[0-9]+]], $[[T0]], $[[T5]]
- ; PRE4: sltu $[[T7:[0-9]+]], $[[T6]], $[[T0]]
- ; PRE4: addu $[[T8:[0-9]+]], $4, $[[T7]]
- ; PRE4: move $4, $[[T4]]
-
- ; GP32-CMOV: addiu $[[T0:[0-9]+]], $7, 3
- ; GP32-CMOV: sltu $[[T1:[0-9]+]], $[[T0]], $7
- ; GP32-CMOV: addu $[[T2:[0-9]+]], $6, $[[T1]]
- ; GP32-CMOV: sltu $[[T3:[0-9]+]], $[[T2]], $6
- ; GP32-CMOV: movz $[[T3]], $[[T1]], $[[T1]]
- ; GP32-CMOV: addu $[[T4:[0-9]+]], $5, $[[T3]]
- ; GP32-CMOV: sltu $[[T5:[0-9]+]], $[[T4]], $5
- ; GP32-CMOV: addu $[[T7:[0-9]+]], $4, $[[T5]]
- ; GP32-CMOV: move $4, $[[T2]]
- ; GP32-CMOV: move $5, $[[T0]]
-
- ; GP64: daddiu $[[T0:[0-9]+]], $5, 3
- ; GP64: sltu $[[T1:[0-9]+]], $[[T0]], $5
-
- ; GP64-NOT-R2-R6: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; GP64-NOT-R2-R6: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; GP64-R2-R6: dext $[[T3:[0-9]+]], $[[T1]], 0, 32
-
- ; GP64: daddu $2, $4, $[[T3]]
-
- ; MMR3: move $[[T1:[0-9]+]], $7
- ; MMR3: addius5 $[[T1]], 3
- ; MMR3: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; MMR3: sltu $[[T3:[0-9]+]], $[[T1]], $7
- ; MMR3: addu16 $[[T4:[0-9]+]], $6, $[[T3]]
- ; MMR3: sltu $[[T5:[0-9]+]], $[[T4]], $6
- ; MMR3: movz $[[T5]], $[[T3]], $[[T2]]
- ; MMR3: addu16 $[[T6:[0-9]+]], $5, $[[T5]]
- ; MMR3: sltu $[[T7:[0-9]+]], $[[T6]], $5
- ; MMR3: addu16 $2, $4, $[[T7]]
-
- ; MMR6: move $[[T1:[0-9]+]], $7
- ; MMR6: addius5 $[[T1]], 3
- ; MMR6: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; MMR6: xori $[[T3:[0-9]+]], $[[T2]], 1
- ; MMR6: selnez $[[T4:[0-9]+]], $[[T2]], $[[T3]]
- ; MMR6: addu16 $[[T5:[0-9]+]], $6, $[[T2]]
- ; MMR6: sltu $[[T6:[0-9]+]], $[[T5]], $6
- ; MMR6: seleqz $[[T7:[0-9]+]], $[[T6]], $[[T3]]
- ; MMR6: or $[[T8:[0-9]+]], $[[T4]], $[[T7]]
- ; MMR6: addu16 $[[T9:[0-9]+]], $5, $[[T8]]
- ; MMR6: sltu $[[T10:[0-9]+]], $[[T9]], $5
- ; MMR6: addu16 $[[T11:[0-9]+]], $4, $[[T10]]
- ; MMR6: move $4, $[[T5]]
- ; MMR6: move $5, $[[T1]]
+ ; GP32: addiu $[[T0:[0-9]+]], $7, 3
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 3
+ ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP32: addu $[[T2:[0-9]+]], $6, $[[T1]]
+ ; GP32: sltu $[[T3:[0-9]+]], $[[T2]], $zero
+ ; GP32: addu $[[T4:[0-9]+]], $5, $[[T3]]
+ ; GP32: sltu $[[T5:[0-9]+]], $[[T4]], $zero
+ ; GP32: addu $[[T5]], $4, $[[T5]]
+ ; GP32: move $4, $[[T2]]
+ ; GP32: move $5, $[[T0]]
+
+ ; GP64: daddiu $[[T0:[0-9]+]], $5, 3
+ ; GP64: daddiu $[[T1:[0-9]+]], $zero, 3
+ ; GP64: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP64: daddu $2, $4, $[[T1]]
+
+ ; MM32: addiu $[[T0:[0-9]+]], $7, 3
+ ; MM32: li16 $[[T1:[0-9]+]], 3
+ ; MM32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; MM32: addu16 $[[T2:[0-9]+]], $6, $[[T1]]
+ ; MM32: li16 $[[T3:[0-9]+]], 0
+ ; MM32: sltu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; MM32: addu16 $[[T4]], $5, $[[T4]]
+ ; MM32: sltu $[[T5:[0-9]+]], $[[T4]], $[[T3]]
+ ; MM32: addu16 $[[T5]], $4, $[[T5]]
+ ; MM32: move $4, $[[T2]]
+ ; MM32: move $5, $[[T0]]
; MM64: daddiu $[[T0:[0-9]+]], $5, 3
- ; MM64: sltu $[[T1:[0-9]+]], $[[T0]], $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $4, $[[T3]]
+ ; MM64: daddiu $[[T1:[0-9]+]], $zero, 3
+ ; MM64: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; MM64: daddu $2, $4, $[[T1]]
%r = add i128 3, %a
ret i128 %r
diff --git a/test/CodeGen/Mips/llvm-ir/sub.ll b/test/CodeGen/Mips/llvm-ir/sub.ll
index 655addb10a64e..a730063c552f4 100644
--- a/test/CodeGen/Mips/llvm-ir/sub.ll
+++ b/test/CodeGen/Mips/llvm-ir/sub.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM,PRE4
+; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
@@ -11,25 +11,25 @@
; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
; RUN: -check-prefixes=R2-R6,GP32,GP32-NOT-MM,NOT-MM
; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -verify-machineinstrs | FileCheck %s \
-; RUN: -check-prefixes=GP32-MM,GP32,MM32,MMR3
+; RUN: -check-prefixes=GP32-MM,GP32,MM
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=GP32-MM,GP32,MM32,MMR6
+; RUN: -check-prefixes=GP32-MM,GP32,MM
; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=GP64,MM64
+; RUN: -check-prefixes=GP64,MM
define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -100,15 +100,10 @@ define signext i64 @sub_i64(i64 signext %a, i64 signext %b) {
entry:
; ALL-LABEL: sub_i64:
- ; GP32-NOT-MM: sltu $[[T0:[0-9]+]], $5, $7
- ; GP32-NOT-MM: subu $2, $4, $6
- ; GP32-NOT-MM: subu $2, $2, $[[T0]]
- ; GP32-NOT-MM: subu $3, $5, $7
-
- ; MM32: sltu $[[T0:[0-9]+]], $5, $7
- ; MM32: subu16 $3, $4, $6
- ; MM32: subu16 $2, $3, $[[T0]]
- ; MM32: subu16 $3, $5, $7
+ ; GP32-NOT-MM subu $3, $5, $7
+ ; GP32: sltu $[[T0:[0-9]+]], $5, $7
+ ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP32: subu $2, $4, $[[T1]]
; GP64: dsubu $2, $4, $5
@@ -120,109 +115,42 @@ define signext i128 @sub_i128(i128 signext %a, i128 signext %b) {
entry:
; ALL-LABEL: sub_i128:
-; PRE4: lw $[[T0:[0-9]+]], 24($sp)
-; PRE4: lw $[[T1:[0-9]+]], 28($sp)
-; PRE4: sltu $[[T2:[0-9]+]], $7, $[[T1]]
-; PRE4: xor $[[T3:[0-9]+]], $6, $[[T0]]
-; PRE4: sltiu $[[T4:[0-9]+]], $[[T3]], 1
-; PRE4: bnez $[[T4]]
-; PRE4: move $[[T5:[0-9]+]], $[[T2]]
-; PRE4: sltu $[[T5]], $6, $[[T0]]
-
-; PRE4: lw $[[T6:[0-9]+]], 20($sp)
-; PRE4: subu $[[T7:[0-9]+]], $5, $[[T6]]
-; PRE4: subu $[[T8:[0-9]+]], $[[T7]], $[[T5]]
-; PRE4: sltu $[[T9:[0-9]+]], $[[T7]], $[[T5]]
-; PRE4: sltu $[[T10:[0-9]+]], $5, $[[T6]]
-; PRE4: lw $[[T11:[0-9]+]], 16($sp)
-; PRE4: subu $[[T12:[0-9]+]], $4, $[[T11]]
-; PRE4: subu $[[T13:[0-9]+]], $[[T12]], $[[T10]]
-; PRE4: subu $[[T14:[0-9]+]], $[[T13]], $[[T9]]
-; PRE4: subu $[[T15:[0-9]+]], $6, $[[T0]]
-; PRE4: subu $[[T16:[0-9]+]], $[[T15]], $[[T2]]
-; PRE4: subu $5, $7, $[[T1]]
-
-; MMR3: lw $[[T1:[0-9]+]], 48($sp)
-; MMR3: sltu $[[T2:[0-9]+]], $6, $[[T1]]
-; MMR3: xor $[[T3:[0-9]+]], $6, $[[T1]]
-; MMR3: lw $[[T4:[0-9]+]], 52($sp)
-; MMR3: sltu $[[T5:[0-9]+]], $7, $[[T4]]
-; MMR3: movz $[[T6:[0-9]+]], $[[T5]], $[[T3]]
-; MMR3: lw $[[T7:[0-8]+]], 44($sp)
-; MMR3: subu16 $[[T8:[0-9]+]], $5, $[[T7]]
-; MMR3: subu16 $[[T9:[0-9]+]], $[[T8]], $[[T6]]
-; MMR3: sltu $[[T10:[0-9]+]], $[[T8]], $[[T2]]
-; MMR3: sltu $[[T11:[0-9]+]], $5, $[[T7]]
-; MMR3: lw $[[T12:[0-9]+]], 40($sp)
-; MMR3: lw $[[T13:[0-9]+]], 12($sp)
-; MMR3: subu16 $[[T14:[0-9]+]], $[[T13]], $[[T12]]
-; MMR3: subu16 $[[T15:[0-9]+]], $[[T14]], $[[T11]]
-; MMR3: subu16 $[[T16:[0-9]+]], $[[T15]], $[[T10]]
-; MMR3: subu16 $[[T17:[0-9]+]], $6, $[[T1]]
-; MMR3: subu16 $[[T18:[0-9]+]], $[[T17]], $7
-; MMR3: lw $[[T19:[0-9]+]], 8($sp)
-; MMR3: lw $[[T20:[0-9]+]], 0($sp)
-; MMR3: subu16 $5, $[[T19]], $[[T20]]
-
-; MMR6: move $[[T0:[0-9]+]], $7
-; MMR6: sw $[[T0]], 8($sp)
-; MMR6: move $[[T1:[0-9]+]], $5
-; MMR6: sw $4, 12($sp)
-; MMR6: lw $[[T2:[0-9]+]], 48($sp)
-; MMR6: sltu $[[T3:[0-9]+]], $6, $[[T2]]
-; MMR6: xor $[[T4:[0-9]+]], $6, $[[T2]]
-; MMR6: sltiu $[[T5:[0-9]+]], $[[T4]], 1
-; MMR6: seleqz $[[T6:[0-9]+]], $[[T3]], $[[T5]]
-; MMR6: lw $[[T7:[0-9]+]], 52($sp)
-; MMR6: sltu $[[T8:[0-9]+]], $[[T0]], $[[T7]]
-; MMR6: selnez $[[T9:[0-9]+]], $[[T8]], $[[T5]]
-; MMR6: or $[[T10:[0-9]+]], $[[T9]], $[[T6]]
-; MMR6: lw $[[T11:[0-9]+]], 44($sp)
-; MMR6: subu16 $[[T12:[0-9]+]], $[[T1]], $[[T11]]
-; MMR6: subu16 $[[T13:[0-9]+]], $[[T12]], $[[T7]]
-; MMR6: sltu $[[T16:[0-9]+]], $[[T12]], $[[T7]]
-; MMR6: sltu $[[T17:[0-9]+]], $[[T1]], $[[T11]]
-; MMR6: lw $[[T18:[0-9]+]], 40($sp)
-; MMR6: lw $[[T19:[0-9]+]], 12($sp)
-; MMR6: subu16 $[[T20:[0-9]+]], $[[T19]], $[[T18]]
-; MMR6: subu16 $[[T21:[0-9]+]], $[[T20]], $[[T17]]
-; MMR6: subu16 $[[T22:[0-9]+]], $[[T21]], $[[T16]]
-; MMR6: subu16 $[[T23:[0-9]+]], $6, $[[T2]]
-; MMR6: subu16 $4, $[[T23]], $5
-; MMR6: lw $[[T24:[0-9]+]], 8($sp)
-; MMR6: lw $[[T25:[0-9]+]], 0($sp)
-; MMR6: subu16 $5, $[[T24]], $[[T25]]
-; MMR6: lw $3, 4($sp)
-
-; FIXME: The sltu, dsll, dsrl pattern here occurs when an i32 is zero
-; extended to 64 bits. Fortunately slt(i)(u) actually gives an i1.
-; These should be combined away.
-
-; GP64-NOT-R2: dsubu $1, $4, $6
-; GP64-NOT-R2: sltu $[[T0:[0-9]+]], $5, $7
-; GP64-NOT-R2: dsll $[[T1:[0-9]+]], $[[T0]], 32
-; GP64-NOT-R2: dsrl $[[T2:[0-9]+]], $[[T1]], 32
-; GP64-NOT-R2: dsubu $2, $1, $[[T2]]
-; GP64-NOT-R2: dsubu $3, $5, $7
-
-; FIXME: Likewise for the sltu, dext here.
-
-; GP64-R2: dsubu $1, $4, $6
-; GP64-R2: sltu $[[T0:[0-9]+]], $5, $7
-; GP64-R2: dext $[[T1:[0-9]+]], $[[T0]], 0, 32
-; GP64-R2: dsubu $2, $1, $[[T1]]
-; GP64-R2: dsubu $3, $5, $7
-
-; FIXME: Again, redundant sign extension. Also, microMIPSR6 has the
-; dext instruction which should be used here.
-
-; MM64: dsubu $[[T0:[0-9]+]], $4, $6
-; MM64: sltu $[[T1:[0-9]+]], $5, $7
-; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
-; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
-; MM64: dsubu $2, $[[T0]], $[[T3]]
-; MM64: dsubu $3, $5, $7
-; MM64: jr $ra
+ ; GP32-NOT-MM: lw $[[T0:[0-9]+]], 20($sp)
+ ; GP32-NOT-MM: sltu $[[T1:[0-9]+]], $5, $[[T0]]
+ ; GP32-NOT-MM: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32-NOT-MM: addu $[[T3:[0-9]+]], $[[T1]], $[[T2]]
+ ; GP32-NOT-MM: lw $[[T4:[0-9]+]], 24($sp)
+ ; GP32-NOT-MM: lw $[[T5:[0-9]+]], 28($sp)
+ ; GP32-NOT-MM: subu $[[T6:[0-9]+]], $7, $[[T5]]
+ ; GP32-NOT-MM: subu $2, $4, $[[T3]]
+ ; GP32-NOT-MM: sltu $[[T8:[0-9]+]], $6, $[[T4]]
+ ; GP32-NOT-MM: addu $[[T9:[0-9]+]], $[[T8]], $[[T0]]
+ ; GP32-NOT-MM: subu $3, $5, $[[T9]]
+ ; GP32-NOT-MM: sltu $[[T10:[0-9]+]], $7, $[[T5]]
+ ; GP32-NOT-MM: addu $[[T11:[0-9]+]], $[[T10]], $[[T4]]
+ ; GP32-NOT-MM: subu $4, $6, $[[T11]]
+ ; GP32-NOT-MM: move $5, $[[T6]]
+
+ ; GP32-MM: lw $[[T0:[0-9]+]], 20($sp)
+ ; GP32-MM: sltu $[[T1:[0-9]+]], $[[T2:[0-9]+]], $[[T0]]
+ ; GP32-MM: lw $[[T3:[0-9]+]], 16($sp)
+ ; GP32-MM: addu $[[T3]], $[[T1]], $[[T3]]
+ ; GP32-MM: lw $[[T4:[0-9]+]], 24($sp)
+ ; GP32-MM: lw $[[T5:[0-9]+]], 28($sp)
+ ; GP32-MM: subu $[[T1]], $7, $[[T5]]
+ ; GP32-MM: subu16 $[[T3]], $[[T6:[0-9]+]], $[[T3]]
+ ; GP32-MM: sltu $[[T6]], $6, $[[T4]]
+ ; GP32-MM: addu16 $[[T0]], $[[T6]], $[[T0]]
+ ; GP32-MM: subu16 $[[T0]], $5, $[[T0]]
+ ; GP32-MM: sltu $[[T6]], $7, $[[T5]]
+ ; GP32-MM: addu $[[T6]], $[[T6]], $[[T4]]
+ ; GP32-MM: subu16 $[[T6]], $6, $[[T6]]
+ ; GP32-MM: move $[[T2]], $[[T1]]
+
+ ; GP64: dsubu $3, $5, $7
+ ; GP64: sltu $[[T0:[0-9]+]], $5, $7
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP64: dsubu $2, $4, $[[T1]]
%r = sub i128 %a, %b
ret i128 %r
diff --git a/test/CodeGen/Mips/madd-msub.ll b/test/CodeGen/Mips/madd-msub.ll
index 3e1a2e8b97088..7baba005a0729 100644
--- a/test/CodeGen/Mips/madd-msub.ll
+++ b/test/CodeGen/Mips/madd-msub.ll
@@ -25,11 +25,11 @@
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-; 32R6-DAG: muh $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sra $[[T4:[0-9]+]], $6, 31
-; 32R6-DAG: addu $[[T5:[0-9]+]], $[[T3]], $[[T4]]
-; 32R6-DAG: addu $2, $[[T5]], $[[T2]]
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
+; 32R6-DAG: sra $[[T3:[0-9]+]], $6, 31
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -71,7 +71,7 @@ entry:
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
; FIXME: There's a redundant move here. We should remove it
; 32R6-DAG: muhu $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $2, $[[T3]], $[[T2]]
@@ -109,10 +109,10 @@ entry:
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $7
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $1
-; 32R6-DAG: muh $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T3]], $6
-; 32R6-DAG: addu $2, $[[T4]], $[[T2]]
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $7
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $6
+; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -134,17 +134,6 @@ entry:
ret i64 %add
}
-; ALL-LABEL: madd4
-; ALL-NOT: madd ${{[0-9]+}}, ${{[0-9]+}}
-
-define i32 @madd4(i32 %a, i32 %b, i32 %c) {
-entry:
- %mul = mul nsw i32 %a, %b
- %add = add nsw i32 %c, %mul
-
- ret i32 %add
-}
-
; ALL-LABEL: msub1:
; 32-DAG: sra $[[T0:[0-9]+]], $6, 31
@@ -159,13 +148,13 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sltu $[[T1:[0-9]+]], $6, $[[T0]]
-; 32R6-DAG: muh $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sra $[[T3:[0-9]+]], $6, 31
-; 32R6-DAG: subu $[[T4:[0-9]+]], $[[T3]], $[[T2]]
-; 32R6-DAG: subu $2, $[[T4]], $[[T1]]
-; 32R6-DAG: subu $3, $6, $[[T0]]
+; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T3:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T3]], $[[T0]]
+; 32R6-DAG: sra $[[T5:[0-9]+]], $6, 31
+; 32R6-DAG: subu $2, $[[T5]], $[[T4]]
+; 32R6-DAG: subu $3, $6, $[[T1]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -205,12 +194,13 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sltu $[[T1:[0-9]+]], $6, $[[T0]]
-; 32R6-DAG: muhu $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: negu $[[T3:[0-9]+]], $[[T2]]
-; 32R6-DAG: subu $2, $[[T3]], $[[T1]]
-; 32R6-DAG: subu $3, $6, $[[T0]]
+; 32R6-DAG: muhu $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG: negu $2, $[[T3]]
+; 32R6-DAG: subu $3, $6, $[[T1]]
; 64-DAG: d[[m:m]]ult $5, $4
; 64-DAG: [[m]]flo $[[T0:[0-9]+]]
@@ -244,12 +234,12 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sltu $[[T1:[0-9]+]], $7, $[[T0]]
-; 32R6-DAG: muh $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: subu $[[T3:[0-9]+]], $6, $[[T2]]
-; 32R6-DAG: subu $2, $[[T3]], $[[T1]]
-; 32R6-DAG: subu $3, $7, $[[T0]]
+; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $7, $[[T1]]
+; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG: subu $2, $6, $[[T3]]
+; 32R6-DAG: subu $3, $7, $[[T1]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -270,14 +260,3 @@ entry:
%sub = sub nsw i64 %c, %mul
ret i64 %sub
}
-
-; ALL-LABEL: msub4
-; ALL-NOT: msub ${{[0-9]+}}, ${{[0-9]+}}
-
-define i32 @msub4(i32 %a, i32 %b, i32 %c) {
-entry:
- %mul = mul nsw i32 %a, %b
- %sub = sub nsw i32 %c, %mul
-
- ret i32 %sub
-}
diff --git a/test/CodeGen/NVPTX/lower-aggr-copies.ll b/test/CodeGen/NVPTX/lower-aggr-copies.ll
index 192d4becb0596..f522c6722ee6f 100644
--- a/test/CodeGen/NVPTX/lower-aggr-copies.ll
+++ b/test/CodeGen/NVPTX/lower-aggr-copies.ll
@@ -17,6 +17,8 @@ entry:
ret i8* %dst
; IR-LABEL: @memcpy_caller
+; IR: [[CMPREG:%[0-9]+]] = icmp eq i64 0, %n
+; IR: br i1 [[CMPREG]], label %split, label %loadstoreloop
; IR: loadstoreloop:
; IR: [[LOADPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64
; IR-NEXT: [[VAL:%[0-9]+]] = load i8, i8* [[LOADPTR]]
@@ -73,6 +75,8 @@ entry:
; IR-LABEL: @memset_caller
; IR: [[VAL:%[0-9]+]] = trunc i32 %c to i8
+; IR: [[CMPREG:%[0-9]+]] = icmp eq i64 0, %n
+; IR: br i1 [[CMPREG]], label %split, label %loadstoreloop
; IR: loadstoreloop:
; IR: [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
; IR-NEXT: store i8 [[VAL]], i8* [[STOREPTR]]
diff --git a/test/CodeGen/PowerPC/anon_aggr.ll b/test/CodeGen/PowerPC/anon_aggr.ll
index 9b32a8f55f34c..2c17358444771 100644
--- a/test/CodeGen/PowerPC/anon_aggr.ll
+++ b/test/CodeGen/PowerPC/anon_aggr.ll
@@ -1,6 +1,6 @@
; RUN: llc -verify-machineinstrs -O0 -mcpu=ppc64 -mtriple=powerpc64-unknown-linux-gnu -fast-isel=false < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -O0 -mcpu=g4 -mtriple=powerpc-apple-darwin8 < %s | FileCheck -check-prefix=DARWIN32 %s
-; RUN: llc -verify-machineinstrs -O0 -mcpu=ppc970 -mtriple=powerpc64-apple-darwin8 < %s | FileCheck -check-prefix=DARWIN64 %s
+; RUN: llc -verify-machineinstrs -O0 -mcpu=970 -mtriple=powerpc64-apple-darwin8 < %s | FileCheck -check-prefix=DARWIN64 %s
; Test case for PR 14779: anonymous aggregates are not handled correctly.
; Darwin bug report PR 15821 is similar.
@@ -22,7 +22,7 @@ unequal:
; CHECK-LABEL: func1:
; CHECK: cmpld {{([0-9]+,)?}}4, 5
-; CHECK-DAG: std 4, -[[OFFSET1:[0-9]+]]
+; CHECK-DAG: std 3, -[[OFFSET1:[0-9]+]]
; CHECK-DAG: std 5, -[[OFFSET2:[0-9]+]]
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
@@ -31,19 +31,19 @@ unequal:
; DARWIN32: mr
; DARWIN32: mr r[[REG1:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN32: mr r[[REG2:[0-9]+]], r[[REGB:[0-9]+]]
-; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REGA]], r[[REGB]]
+; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REGB]], r[[REGA]]
; DARWIN32: stw r[[REG1]], -[[OFFSET1:[0-9]+]]
; DARWIN32: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
-; DARWIN32: lwz r3, -[[OFFSET1]]
; DARWIN32: lwz r3, -[[OFFSET2]]
+; DARWIN32: lwz r3, -[[OFFSET1]]
; DARWIN64: _func1:
; DARWIN64: mr
; DARWIN64: mr r[[REG1:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN64: mr r[[REG2:[0-9]+]], r[[REGB:[0-9]+]]
-; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REGB]]
-; DARWIN64: std r[[REG1]], -[[OFFSET1:[0-9]+]]
-; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]]
+; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGB]], r[[REGA]]
+; DARWIN64: std r[[REG1]], -[[OFFSET2:[0-9]+]]
+; DARWIN64: std r[[REG2]], -[[OFFSET1:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
; DARWIN64: ld r3, -[[OFFSET2]]
@@ -61,19 +61,19 @@ unequal:
ret i8* %array2_ptr
}
; CHECK-LABEL: func2:
-; CHECK: cmpld {{([0-9]+,)?}}4, 6
+; CHECK-DAG: cmpld {{([0-9]+,)?}}4, 6
; CHECK-DAG: std 6, 72(1)
; CHECK-DAG: std 5, 64(1)
; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]]
-; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]]
+; CHECK-DAG: std 5, -[[OFFSET2:[0-9]+]]
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
; DARWIN32-LABEL: _func2
-; DARWIN32-DAG: addi r[[REG8:[0-9]+]], r[[REGSP:[0-9]+]], 36
-; DARWIN32-DAG: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
; DARWIN32: mr
+; DARWIN32: addi r[[REG8:[0-9]+]], r[[REGSP:[0-9]+]], 36
; DARWIN32: mr r[[REG7:[0-9]+]], r5
+; DARWIN32: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
; DARWIN32-DAG: cmplw {{(cr[0-9]+,)?}}r5, r[[REG2]]
; DARWIN32-DAG: stw r[[REG7]], -[[OFFSET1:[0-9]+]]
; DARWIN32-DAG: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
@@ -82,9 +82,9 @@ unequal:
; DARWIN64: _func2:
-; DARWIN64: ld r[[REG2:[0-9]+]], 72(r1)
; DARWIN64: mr
; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
+; DARWIN64: ld r[[REG2:[0-9]+]], 72(r1)
; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]]
; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]]
; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
@@ -107,9 +107,9 @@ unequal:
}
; CHECK-LABEL: func3:
-; CHECK: cmpld {{([0-9]+,)?}}4, 6
-; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]](1)
-; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]](1)
+; CHECK-DAG: cmpld {{([0-9]+,)?}}3, 4
+; CHECK-DAG: std 3, -[[OFFSET2:[0-9]+]](1)
+; CHECK-DAG: std 4, -[[OFFSET1:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
@@ -127,13 +127,13 @@ unequal:
; DARWIN32-DAG: lwz r3, -[[OFFSET2:[0-9]+]]
; DARWIN64: _func3:
-; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1)
-; DARWIN64: ld r[[REG4:[0-9]+]], 56(r1)
+; DARWIN64-DAG: ld r[[REG3:[0-9]+]], 72(r1)
+; DARWIN64-DAG: ld r[[REG4:[0-9]+]], 56(r1)
; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
-; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
-; DARWIN64: std r[[REG4]], -[[OFFSET2:[0-9]+]]
-; DARWIN64: ld r3, -[[OFFSET2]]
+; DARWIN64: std r[[REG4]], -[[OFFSET1:[0-9]+]]
+; DARWIN64: std r[[REG3]], -[[OFFSET2:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
+; DARWIN64: ld r3, -[[OFFSET2]]
define i8* @func4(i64 %p1, i64 %p2, i64 %p3, i64 %p4,
@@ -152,31 +152,31 @@ unequal:
}
; CHECK-LABEL: func4:
-; CHECK: ld [[REG3:[0-9]+]], 136(1)
-; CHECK: ld [[REG2:[0-9]+]], 120(1)
-; CHECK: cmpld {{([0-9]+,)?}}[[REG2]], [[REG3]]
-; CHECK: std [[REG3]], -[[OFFSET2:[0-9]+]](1)
+; CHECK-DAG: ld [[REG2:[0-9]+]], 120(1)
+; CHECK-DAG: ld [[REG3:[0-9]+]], 136(1)
+; CHECK-DAG: cmpld {{([0-9]+,)?}}[[REG2]], [[REG3]]
; CHECK: std [[REG2]], -[[OFFSET1:[0-9]+]](1)
+; CHECK: std [[REG3]], -[[OFFSET2:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
; DARWIN32: _func4:
; DARWIN32: lwz r[[REG4:[0-9]+]], 96(r1)
; DARWIN32: addi r[[REG1:[0-9]+]], r1, 100
-; DARWIN32: lwz r[[REG3:[0-9]+]], 108(r1)
; DARWIN32: mr r[[REG2:[0-9]+]], r[[REG4]]
+; DARWIN32: lwz r[[REG3:[0-9]+]], 108(r1)
; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
-; DARWIN32: stw r[[REG2]], -[[OFFSET1:[0-9]+]]
-; DARWIN32: stw r[[REG3]], -[[OFFSET2:[0-9]+]]
-; DARWIN32: lwz r[[REG1]], -[[OFFSET1]]
-; DARWIN32: lwz r[[REG1]], -[[OFFSET2]]
+; DARWIN32-DAG: stw r[[REG2]], -[[OFFSET1:[0-9]+]]
+; DARWIN32-DAG: stw r[[REG3]], -[[OFFSET2:[0-9]+]]
+; DARWIN32: lwz r3, -[[OFFSET1]]
+; DARWIN32: lwz r3, -[[OFFSET2]]
; DARWIN64: _func4:
; DARWIN64: ld r[[REG2:[0-9]+]], 120(r1)
-; DARWIN64: ld r[[REG3:[0-9]+]], 136(r1)
-; DARWIN64: mr r[[REG4:[0-9]+]], r[[REG2]]
+; DARWIN64-DAG: ld r[[REG3:[0-9]+]], 136(r1)
+; DARWIN64-DAG: mr r[[REG4:[0-9]+]], r[[REG2]]
; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REG2]], r[[REG3]]
-; DARWIN64: std r[[REG4]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG3]], -[[OFFSET2:[0-9]+]]
+; DARWIN64: std r[[REG4]], -[[OFFSET1:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
; DARWIN64: ld r3, -[[OFFSET2]]
diff --git a/test/CodeGen/PowerPC/floatPSA.ll b/test/CodeGen/PowerPC/floatPSA.ll
index ccda9d56a147c..73dea19adbd5e 100644
--- a/test/CodeGen/PowerPC/floatPSA.ll
+++ b/test/CodeGen/PowerPC/floatPSA.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -O0 -mtriple=powerpc64-unknown-linux-gnu -fast-isel=false < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -O2 -mtriple=powerpc64-unknown-linux-gnu -fast-isel=false < %s | FileCheck %s
; This verifies that single-precision floating point values that can't
; be passed in registers are stored in the rightmost word of the parameter
diff --git a/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll b/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
index d398dfe7fc922..059665adc351f 100644
--- a/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
+++ b/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
@@ -100,28 +100,26 @@ define signext i32 @zeroEqualityTest04() {
; CHECK-NEXT: addi 5, 4, .LzeroEqualityTest02.buffer2@toc@l
; CHECK-NEXT: ldbrx 3, 0, 6
; CHECK-NEXT: ldbrx 4, 0, 5
-; CHECK-NEXT: subf. 7, 4, 3
+; CHECK-NEXT: cmpld 3, 4
; CHECK-NEXT: bne 0, .LBB3_2
; CHECK-NEXT: # BB#1: # %loadbb1
; CHECK-NEXT: li 4, 8
; CHECK-NEXT: ldbrx 3, 6, 4
; CHECK-NEXT: ldbrx 4, 5, 4
-; CHECK-NEXT: subf. 5, 4, 3
-; CHECK-NEXT: beq 0, .LBB3_4
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: beq 0, .LBB3_3
; CHECK-NEXT: .LBB3_2: # %res_block
; CHECK-NEXT: cmpld 3, 4
-; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: li 11, 1
; CHECK-NEXT: li 12, -1
-; CHECK-NEXT: isel 3, 12, 3, 0
+; CHECK-NEXT: isel 5, 12, 11, 0
; CHECK-NEXT: .LBB3_3: # %endblock
-; CHECK-NEXT: cmpwi 3, 1
+; CHECK-NEXT: cmpwi 5, 1
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: isel 3, 4, 3, 0
; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB3_4:
-; CHECK-NEXT: li 3, 0
-; CHECK-NEXT: b .LBB3_3
%call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer2 to i8*), i64 16)
%not.cmp = icmp slt i32 %call, 1
%. = zext i1 %not.cmp to i32
@@ -138,27 +136,25 @@ define signext i32 @zeroEqualityTest05() {
; CHECK-NEXT: addi 5, 4, .LzeroEqualityTest03.buffer2@toc@l
; CHECK-NEXT: ldbrx 3, 0, 6
; CHECK-NEXT: ldbrx 4, 0, 5
-; CHECK-NEXT: subf. 7, 4, 3
+; CHECK-NEXT: cmpld 3, 4
; CHECK-NEXT: bne 0, .LBB4_2
; CHECK-NEXT: # BB#1: # %loadbb1
; CHECK-NEXT: li 4, 8
; CHECK-NEXT: ldbrx 3, 6, 4
; CHECK-NEXT: ldbrx 4, 5, 4
-; CHECK-NEXT: subf. 5, 4, 3
-; CHECK-NEXT: beq 0, .LBB4_4
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: beq 0, .LBB4_3
; CHECK-NEXT: .LBB4_2: # %res_block
; CHECK-NEXT: cmpld 3, 4
-; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: li 11, 1
; CHECK-NEXT: li 12, -1
-; CHECK-NEXT: isel 3, 12, 3, 0
+; CHECK-NEXT: isel 5, 12, 11, 0
; CHECK-NEXT: .LBB4_3: # %endblock
-; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: srwi 3, 5, 31
; CHECK-NEXT: xori 3, 3, 1
; CHECK-NEXT: clrldi 3, 3, 32
; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB4_4:
-; CHECK-NEXT: li 3, 0
-; CHECK-NEXT: b .LBB4_3
%call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer2 to i8*), i64 16)
%call.lobit = lshr i32 %call, 31
%call.lobit.not = xor i32 %call.lobit, 1
diff --git a/test/CodeGen/PowerPC/memcmp.ll b/test/CodeGen/PowerPC/memcmp.ll
index bae713cb2072c..fbaaa8bb74c98 100644
--- a/test/CodeGen/PowerPC/memcmp.ll
+++ b/test/CodeGen/PowerPC/memcmp.ll
@@ -1,87 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-gnu-linux < %s | FileCheck %s -check-prefix=CHECK
-; Check size 8
-; Function Attrs: nounwind readonly
-define signext i32 @test1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) local_unnamed_addr #0 {
-entry:
- %0 = bitcast i32* %buffer1 to i8*
- %1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 8) #2
+define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp8:
+; CHECK: # BB#0:
+; CHECK-NEXT: ldbrx 3, 0, 3
+; CHECK-NEXT: ldbrx 4, 0, 4
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: li 12, -1
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: isel 3, 12, 5, 0
+; CHECK-NEXT: isel 3, 0, 3, 2
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 8)
ret i32 %call
-
-; CHECK-LABEL: @test1
-; CHECK: ldbrx [[LOAD1:[0-9]+]]
-; CHECK-NEXT: ldbrx [[LOAD2:[0-9]+]]
-; CHECK-NEXT: li [[LI:[0-9]+]], 1
-; CHECK-NEXT: cmpld [[CMPLD:[0-9]+]], [[LOAD1]], [[LOAD2]]
-; CHECK-NEXT: subf. [[SUB:[0-9]+]], [[LOAD2]], [[LOAD1]]
-; CHECK-NEXT: li [[LI2:[0-9]+]], -1
-; CHECK-NEXT: isel [[ISEL:[0-9]+]], [[LI2]], [[LI]], 4
-; CHECK-NEXT: isel [[ISEL2:[0-9]+]], 0, [[ISEL]], 2
-; CHECK-NEXT: extsw 3, [[ISEL2]]
-; CHECK-NEXT: blr
}
-; Check size 4
-; Function Attrs: nounwind readonly
-define signext i32 @test2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) local_unnamed_addr #0 {
-entry:
- %0 = bitcast i32* %buffer1 to i8*
- %1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 4) #2
+define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp4:
+; CHECK: # BB#0:
+; CHECK-NEXT: lwbrx 3, 0, 3
+; CHECK-NEXT: lwbrx 4, 0, 4
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: li 12, -1
+; CHECK-NEXT: cmplw 3, 4
+; CHECK-NEXT: isel 3, 12, 5, 0
+; CHECK-NEXT: isel 3, 0, 3, 2
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 4)
ret i32 %call
-
-; CHECK-LABEL: @test2
-; CHECK: lwbrx [[LOAD1:[0-9]+]]
-; CHECK-NEXT: lwbrx [[LOAD2:[0-9]+]]
-; CHECK-NEXT: li [[LI:[0-9]+]], 1
-; CHECK-NEXT: cmpld [[CMPLD:[0-9]+]], [[LOAD1]], [[LOAD2]]
-; CHECK-NEXT: subf. [[SUB:[0-9]+]], [[LOAD2]], [[LOAD1]]
-; CHECK-NEXT: li [[LI2:[0-9]+]], -1
-; CHECK-NEXT: isel [[ISEL:[0-9]+]], [[LI2]], [[LI]], 4
-; CHECK-NEXT: isel [[ISEL2:[0-9]+]], 0, [[ISEL]], 2
-; CHECK-NEXT: extsw 3, [[ISEL2]]
-; CHECK-NEXT: blr
}
-; Check size 2
-; Function Attrs: nounwind readonly
-define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) local_unnamed_addr #0 {
-entry:
- %0 = bitcast i32* %buffer1 to i8*
- %1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 2) #2
+define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp2:
+; CHECK: # BB#0:
+; CHECK-NEXT: lhbrx 3, 0, 3
+; CHECK-NEXT: lhbrx 4, 0, 4
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: li 12, -1
+; CHECK-NEXT: cmplw 3, 4
+; CHECK-NEXT: isel 3, 12, 5, 0
+; CHECK-NEXT: isel 3, 0, 3, 2
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 2)
ret i32 %call
-
-; CHECK-LABEL: @test3
-; CHECK: lhbrx [[LOAD1:[0-9]+]]
-; CHECK-NEXT: lhbrx [[LOAD2:[0-9]+]]
-; CHECK-NEXT: li [[LI:[0-9]+]], 1
-; CHECK-NEXT: cmpld [[CMPLD:[0-9]+]], [[LOAD1]], [[LOAD2]]
-; CHECK-NEXT: subf. [[SUB:[0-9]+]], [[LOAD2]], [[LOAD1]]
-; CHECK-NEXT: li [[LI2:[0-9]+]], -1
-; CHECK-NEXT: isel [[ISEL:[0-9]+]], [[LI2]], [[LI]], 4
-; CHECK-NEXT: isel [[ISEL2:[0-9]+]], 0, [[ISEL]], 2
-; CHECK-NEXT: extsw 3, [[ISEL2]]
-; CHECK-NEXT: blr
}
-; Check size 1
-; Function Attrs: nounwind readonly
-define signext i32 @test4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) local_unnamed_addr #0 {
-entry:
- %0 = bitcast i32* %buffer1 to i8*
- %1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 1) #2
+define signext i32 @memcmp1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp1:
+; CHECK: # BB#0:
+; CHECK-NEXT: lbz 3, 0(3)
+; CHECK-NEXT: lbz 4, 0(4)
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: li 12, -1
+; CHECK-NEXT: cmplw 3, 4
+; CHECK-NEXT: isel 3, 12, 5, 0
+; CHECK-NEXT: isel 3, 0, 3, 2
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 1) #2
ret i32 %call
-
-; CHECK-LABEL: @test4
-; CHECK: lbz [[LOAD1:[0-9]+]]
-; CHECK-NEXT: lbz [[LOAD2:[0-9]+]]
-; CHECK-NEXT: subf [[SUB:[0-9]+]], [[LOAD2]], [[LOAD1]]
-; CHECK-NEXT: extsw 3, [[SUB]]
-; CHECK-NEXT: blr
}
-; Function Attrs: nounwind readonly
-declare signext i32 @memcmp(i8*, i8*, i64) #1
+declare signext i32 @memcmp(i8*, i8*, i64)
diff --git a/test/CodeGen/PowerPC/memcmpIR.ll b/test/CodeGen/PowerPC/memcmpIR.ll
index f052cc258df8d..55f48ad19a636 100644
--- a/test/CodeGen/PowerPC/memcmpIR.ll
+++ b/test/CodeGen/PowerPC/memcmpIR.ll
@@ -3,48 +3,47 @@
define signext i32 @test1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
entry:
+ ; CHECK-LABEL: @test1(
; CHECK: [[LOAD1:%[0-9]+]] = load i64, i64*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[BSWAP1]], [[BSWAP2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
; CHECK-LABEL: res_block:{{.*}}
; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
; CHECK-NEXT: br label %endblock
+ ; CHECK-LABEL: loadbb1:{{.*}}
; CHECK: [[GEP1:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
; CHECK-NEXT: [[GEP2:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
; CHECK-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[GEP1]]
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[GEP2]]
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[BSWAP1]], [[BSWAP2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label %endblock
-
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %endblock, label %res_block
+ ; CHECK-BE-LABEL: @test1(
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i64, i64*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[LOAD1]], [[LOAD2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
; CHECK-BE-LABEL: res_block:{{.*}}
; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
; CHECK-BE-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
; CHECK-BE-NEXT: br label %endblock
+ ; CHECK-BE-LABEL: loadbb1:{{.*}}
; CHECK-BE: [[GEP1:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
; CHECK-BE-NEXT: [[GEP2:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
; CHECK-BE-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[GEP1]]
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[GEP2]]
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[LOAD1]], [[LOAD2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label %endblock
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %endblock, label %res_block
%0 = bitcast i32* %buffer1 to i8*
%1 = bitcast i32* %buffer2 to i8*
@@ -55,33 +54,25 @@ entry:
declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1
define signext i32 @test2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+ ; CHECK-LABEL: @test2(
; CHECK: [[LOAD1:%[0-9]+]] = load i32, i32*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD2]])
- ; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[BSWAP1]] to i64
- ; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[BSWAP2]] to i64
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label %endblock
-
- ; CHECK-LABEL: res_block:{{.*}}
- ; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
- ; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
- ; CHECK-NEXT: br label %endblock
+ ; CHECK-NEXT: [[CMP1:%[0-9]+]] = icmp ne i32 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: [[CMP2:%[0-9]+]] = icmp ult i32 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: [[SELECT1:%[0-9]+]] = select i1 [[CMP2]], i32 -1, i32 1
+ ; CHECK-NEXT: [[SELECT2:%[0-9]+]] = select i1 [[CMP1]], i32 [[SELECT1]], i32 0
+ ; CHECK-NEXT: ret i32 [[SELECT2]]
+ ; CHECK-BE-LABEL: @test2(
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i32, i32*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
- ; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[LOAD1]] to i64
- ; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[LOAD2]] to i64
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label %endblock
-
- ; CHECK-BE-LABEL: res_block:{{.*}}
- ; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
- ; CHECK-BE-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
- ; CHECK-BE-NEXT: br label %endblock
+ ; CHECK-BE-NEXT: [[CMP1:%[0-9]+]] = icmp ne i32 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: [[CMP2:%[0-9]+]] = icmp ult i32 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: [[SELECT1:%[0-9]+]] = select i1 [[CMP2]], i32 -1, i32 1
+ ; CHECK-BE-NEXT: [[SELECT2:%[0-9]+]] = select i1 [[CMP1]], i32 [[SELECT1]], i32 0
+ ; CHECK-BE-NEXT: ret i32 [[SELECT2]]
entry:
%0 = bitcast i32* %buffer1 to i8*
@@ -95,35 +86,35 @@ define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture reado
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[BSWAP1]], [[BSWAP2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
; CHECK-LABEL: res_block:{{.*}}
; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
; CHECK-NEXT: br label %endblock
+ ; CHECK-LABEL: loadbb1:{{.*}}
; CHECK: [[LOAD1:%[0-9]+]] = load i32, i32*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD2]])
; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[BSWAP1]] to i64
; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[BSWAP2]] to i64
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb2, label %res_block
+ ; CHECK-LABEL: loadbb2:{{.*}}
; CHECK: [[LOAD1:%[0-9]+]] = load i16, i16*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i16, i16*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i16 @llvm.bswap.i16(i16 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i16 @llvm.bswap.i16(i16 [[LOAD2]])
; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i16 [[BSWAP1]] to i64
; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i16 [[BSWAP2]] to i64
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb3, label %res_block
+ ; CHECK-LABEL: loadbb3:{{.*}}
; CHECK: [[LOAD1:%[0-9]+]] = load i8, i8*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i8, i8*
; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i8 [[LOAD1]] to i32
@@ -133,9 +124,8 @@ define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture reado
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i64, i64*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[LOAD1]], [[LOAD2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
; CHECK-BE-LABEL: res_block:{{.*}}
; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
@@ -146,17 +136,15 @@ define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture reado
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[LOAD1]] to i64
; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[LOAD2]] to i64
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb2, label %res_block
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i16, i16*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i16, i16*
; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i16 [[LOAD1]] to i64
; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i16 [[LOAD2]] to i64
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb3, label %res_block
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i8, i8*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i8, i8*
diff --git a/test/CodeGen/PowerPC/merge_stores_dereferenceable.ll b/test/CodeGen/PowerPC/merge_stores_dereferenceable.ll
new file mode 100644
index 0000000000000..29aee7a3825f3
--- /dev/null
+++ b/test/CodeGen/PowerPC/merge_stores_dereferenceable.ll
@@ -0,0 +1,24 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+
+; This code causes an assertion failure if dereferenceable flag is not properly set when in merging consecutive stores
+; CHECK-LABEL: func:
+; CHECK: lxvd2x [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOT: lxvd2x
+; CHECK: stxvd2x [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+
+define <2 x i64> @func(i64* %pdst) {
+entry:
+ %a = alloca [4 x i64], align 8
+ %psrc0 = bitcast [4 x i64]* %a to i64*
+ %psrc1 = getelementptr inbounds i64, i64* %psrc0, i64 1
+ %d0 = load i64, i64* %psrc0
+ %d1 = load i64, i64* %psrc1
+ %pdst0 = getelementptr inbounds i64, i64* %pdst, i64 0
+ %pdst1 = getelementptr inbounds i64, i64* %pdst, i64 1
+ store i64 %d0, i64* %pdst0, align 8
+ store i64 %d1, i64* %pdst1, align 8
+ %psrcd = bitcast [4 x i64]* %a to <2 x i64>*
+ %vec = load <2 x i64>, <2 x i64>* %psrcd
+ ret <2 x i64> %vec
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64-align-long-double.ll b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
index d59dc64dcf857..ba56dbaa83d0c 100644
--- a/test/CodeGen/PowerPC/ppc64-align-long-double.ll
+++ b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
@@ -1,6 +1,6 @@
-; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O0 -fast-isel=false -mattr=-vsx < %s | FileCheck %s
-; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-VSX %s
-; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-P9 %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O2 -fast-isel=false -mattr=-vsx < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O2 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-VSX %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O2 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-P9 %s
; Verify internal alignment of long double in a struct. The double
; argument comes in in GPR3; GPR4 is skipped; GPRs 5 and 6 contain
@@ -44,9 +44,9 @@ entry:
; CHECK-VSX-DAG: std 3, 48(1)
; CHECK-VSX-DAG: std 5, -16(1)
; CHECK-VSX-DAG: std 6, -8(1)
-; CHECK-VSX: addi 3, 1, -16
-; CHECK-VSX: lxsdx 1, 0, 3
-; CHECK-VSX: addi 3, 1, -8
+; CHECK-VSX-DAG: addi [[REG1:[0-9]+]], 1, -16
+; CHECK-VSX-DAG: addi 3, 1, -8
+; CHECK-VSX: lxsdx 1, 0, [[REG1]]
; CHECK-VSX: lxsdx 2, 0, 3
; FIXME-VSX: addi 4, 1, 48
@@ -54,9 +54,9 @@ entry:
; FIXME-VSX: li 3, 24
; FIXME-VSX: lxsdx 2, 4, 3
-; CHECK-P9: std 6, 72(1)
-; CHECK-P9: std 5, 64(1)
-; CHECK-P9: std 4, 56(1)
-; CHECK-P9: std 3, 48(1)
-; CHECK-P9: mtvsrd 1, 5
-; CHECK-P9: mtvsrd 2, 6
+; CHECK-P9-DAG: std 6, 72(1)
+; CHECK-P9-DAG: std 5, 64(1)
+; CHECK-P9-DAG: std 4, 56(1)
+; CHECK-P9-DAG: std 3, 48(1)
+; CHECK-P9-DAG: mtvsrd 1, 5
+; CHECK-P9-DAG: mtvsrd 2, 6
diff --git a/test/CodeGen/PowerPC/tls.ll b/test/CodeGen/PowerPC/tls.ll
index 55df71b537617..63f498c1662c5 100644
--- a/test/CodeGen/PowerPC/tls.ll
+++ b/test/CodeGen/PowerPC/tls.ll
@@ -11,8 +11,8 @@ target triple = "powerpc64-unknown-linux-gnu"
define i32 @localexec() nounwind {
entry:
;OPT0: addis [[REG1:[0-9]+]], 13, a@tprel@ha
-;OPT0-NEXT: li [[REG2:[0-9]+]], 42
;OPT0-NEXT: addi [[REG1]], [[REG1]], a@tprel@l
+;OPT0-NEXT: li [[REG2:[0-9]+]], 42
;OPT0: stw [[REG2]], 0([[REG1]])
;OPT1: addis [[REG1:[0-9]+]], 13, a@tprel@ha
;OPT1-NEXT: li [[REG2:[0-9]+]], 42
diff --git a/test/CodeGen/PowerPC/tls_get_addr_fence1.mir b/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
new file mode 100644
index 0000000000000..fa8e73e321dda
--- /dev/null
+++ b/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
@@ -0,0 +1,66 @@
+# ADJCALLSTACKDOWN and ADJCALLSTACKUP must be generated around TLS pseudo code as scheduling fence (PR25839).
+# RUN: llc -mtriple=powerpc64le-linux-gnu -run-pass=ppc-tls-dynamic-call -verify-machineinstrs -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "e-m:e-i64:64-n32:64"
+ target triple = "powerpc64le-unknown-linux-gnu"
+
+ @tls_var = external thread_local local_unnamed_addr global i32
+
+ define i32 @tls_func() local_unnamed_addr {
+ entry:
+ %0 = load i32, i32* @tls_var
+ ret i32 %0
+ }
+
+...
+---
+name: tls_func
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 2, class: g8rc, preferred-register: '' }
+liveins:
+ - { reg: '%x2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %x2
+ %0 = ADDIStlsgdHA %x2, @tls_var
+ %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead %x0, implicit-def dead %x3, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def dead %lr8, implicit-def dead %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7
+ %2 = LWZ8 0, killed %1 :: (dereferenceable load 4 from @tls_var)
+ %x3 = COPY %2
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+ ; CHECK-LABEL: bb.0.entry
+ ; CHECK: %[[reg1:[0-9]+]] = ADDIStlsgdHA %x2, @tls_var
+ ; CHECK: ADJCALLSTACKDOWN 0, 0
+ ; CHECK: %x3 = ADDItlsgdL %[[reg1]], @tls_var
+ ; CHECK: %x3 = GETtlsADDR %x3, @tls_var
+ ; CHECK: ADJCALLSTACKUP 0, 0
+ ; CHECK: BLR8
+...
diff --git a/test/CodeGen/PowerPC/tls_get_addr_fence2.mir b/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
new file mode 100644
index 0000000000000..2bb88147fcf49
--- /dev/null
+++ b/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
@@ -0,0 +1,65 @@
+# ADJCALLSTACKDOWN and ADJCALLSTACKUP should not be generated around TLS pseudo code if it is located within existing ADJCALLSTACKDOWN/ADJCALLSTACKUP pair.
+# RUN: llc -mtriple=powerpc64le-linux-gnu -run-pass=ppc-tls-dynamic-call -verify-machineinstrs -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "e-m:e-i64:64-n32:64"
+ target triple = "powerpc64le-unknown-linux-gnu"
+
+ @tls_var = external thread_local local_unnamed_addr global i32
+
+ define i32 @tls_func() local_unnamed_addr {
+ entry:
+ %0 = load i32, i32* @tls_var
+ ret i32 %0
+ }
+
+...
+---
+name: tls_func
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 2, class: g8rc, preferred-register: '' }
+liveins:
+ - { reg: '%x2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %x2
+ ADJCALLSTACKDOWN 32, 0, implicit-def %r1, implicit %r1
+ %0 = ADDIStlsgdHA %x2, @tls_var
+ %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead %x0, implicit-def dead %x3, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def dead %lr8, implicit-def dead %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7
+ %2 = LWZ8 0, killed %1 :: (dereferenceable load 4 from @tls_var)
+ %x3 = COPY %2
+ ADJCALLSTACKUP 32, 0, implicit-def %r1, implicit %r1
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+ ; CHECK-LABEL: bb.0.entry
+ ; CHECK-NOT: ADJCALLSTACKDOWN 0, 0
+ ; CHECK-NOT: ADJCALLSTACKUP 0, 0
+ ; CHECK: BLR8
+...
diff --git a/test/CodeGen/Thumb/long-setcc.ll b/test/CodeGen/Thumb/long-setcc.ll
index 3460edb96f0d7..7db06d0ae35e8 100644
--- a/test/CodeGen/Thumb/long-setcc.ll
+++ b/test/CodeGen/Thumb/long-setcc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi < %s | FileCheck %s
define i1 @t1(i64 %x) {
%B = icmp slt i64 %x, 0
diff --git a/test/CodeGen/Thumb2/constant-islands-new-island.ll b/test/CodeGen/Thumb2/constant-islands-new-island.ll
index 8ed657ef1f2a7..de7b0cce3792d 100644
--- a/test/CodeGen/Thumb2/constant-islands-new-island.ll
+++ b/test/CodeGen/Thumb2/constant-islands-new-island.ll
@@ -1,25 +1,25 @@
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabihf %s -o - | FileCheck %s
-; Check that new water is created by splitting the basic block right after the
+; Check that new water is created by splitting the basic block after the
; load instruction. Previously, new water was created before the load
; instruction, which caused the pass to fail to converge.
define void @test(i1 %tst) {
; CHECK-LABEL: test:
; CHECK: vldr {{s[0-9]+}}, [[CONST:\.LCPI[0-9]+_[0-9]+]]
-; CHECK-NEXT: b.w [[CONTINUE:\.LBB[0-9]+_[0-9]+]]
+; CHECK: b.w [[CONTINUE:\.LBB[0-9]+_[0-9]+]]
; CHECK: [[CONST]]:
; CHECK-NEXT: .long
; CHECK: [[CONTINUE]]:
entry:
- call i32 @llvm.arm.space(i32 2000, i32 undef)
br i1 %tst, label %true, label %false
true:
%val = phi float [12345.0, %entry], [undef, %false]
+ call i32 @llvm.arm.space(i32 2000, i32 undef)
call void @bar(float %val)
ret void
diff --git a/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll b/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
new file mode 100644
index 0000000000000..9fcc0f5d617b0
--- /dev/null
+++ b/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
@@ -0,0 +1,154 @@
+; RUN: llc < %s -mtriple=thumbv7m -mcpu=cortex-m7 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BP
+; RUN: llc < %s -mtriple=thumbv7m -mcpu=cortex-m3 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOBP
+
+declare void @otherfn()
+
+; CHECK-LABEL: triangle1:
+; CHECK: itt ne
+; CHECK: movne
+; CHECK: strne
+define i32 @triangle1(i32 %n, i32* %p) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ store i32 1, i32* %p, align 4
+ br label %if.end
+
+if.end:
+ tail call void @otherfn()
+ ret i32 0
+}
+
+; CHECK-LABEL: triangle2:
+; CHECK-BP: itttt ne
+; CHECK-BP: movne
+; CHECK-BP: strne
+; CHECK-BP: movne
+; CHECK-BP: strne
+; CHECK-NOBP: cbz
+; CHECK-NOBP: movs
+; CHECK-NOBP: str
+; CHECK-NOBP: movs
+; CHECK-NOBP: str
+define i32 @triangle2(i32 %n, i32* %p, i32* %q) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ store i32 1, i32* %p, align 4
+ store i32 2, i32* %q, align 4
+ br label %if.end
+
+if.end:
+ tail call void @otherfn()
+ ret i32 0
+}
+
+; CHECK-LABEL: triangle3:
+; CHECK: cbz
+; CHECK: movs
+; CHECK: str
+; CHECK: movs
+; CHECK: str
+; CHECK: movs
+; CHECK: str
+define i32 @triangle3(i32 %n, i32* %p, i32* %q, i32* %r) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ store i32 1, i32* %p, align 4
+ store i32 2, i32* %q, align 4
+ store i32 3, i32* %r, align 4
+ br label %if.end
+
+if.end:
+ tail call void @otherfn()
+ ret i32 0
+}
+
+; CHECK-LABEL: diamond1:
+; CHECK: ite eq
+; CHECK: ldreq
+; CHECK: strne
+define i32 @diamond1(i32 %n, i32* %p) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then:
+ store i32 %n, i32* %p, align 4
+ br label %if.end
+
+if.else:
+ %0 = load i32, i32* %p, align 4
+ br label %if.end
+
+if.end:
+ %n.addr.0 = phi i32 [ %n, %if.then ], [ %0, %if.else ]
+ tail call void @otherfn()
+ ret i32 %n.addr.0
+}
+
+; CHECK-LABEL: diamond2:
+; CHECK-BP: itte
+; CHECK-BP: streq
+; CHECK-BP: ldreq
+; CHECK-BP: strne
+; CHECK-NOBP: cbz
+; CHECK-NOBP: str
+; CHECK-NOBP: b
+; CHECK-NOBP: str
+; CHECK-NOBP: ldr
+define i32 @diamond2(i32 %n, i32 %m, i32* %p, i32* %q) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then:
+ store i32 %n, i32* %p, align 4
+ br label %if.end
+
+if.else:
+ store i32 %m, i32* %q, align 4
+ %0 = load i32, i32* %p, align 4
+ br label %if.end
+
+if.end:
+ %n.addr.0 = phi i32 [ %n, %if.then ], [ %0, %if.else ]
+ tail call void @otherfn()
+ ret i32 %n.addr.0
+}
+
+; CHECK-LABEL: diamond3:
+; CHECK: cbz
+; CHECK: movs
+; CHECK: str
+; CHECK: b
+; CHECK: ldr
+; CHECK: ldr
+; CHECK: adds
+define i32 @diamond3(i32 %n, i32* %p, i32* %q) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then:
+ store i32 1, i32* %p, align 4
+ br label %if.end
+
+if.else:
+ %0 = load i32, i32* %p, align 4
+ %1 = load i32, i32* %q, align 4
+ %add = add nsw i32 %1, %0
+ br label %if.end
+
+if.end:
+ %n.addr.0 = phi i32 [ %n, %if.then ], [ %add, %if.else ]
+ tail call void @otherfn()
+ ret i32 %n.addr.0
+}
diff --git a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
index 4a76e100b6580..3c74dde111485 100644
--- a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
+++ b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
@@ -35,9 +35,6 @@ entry:
; CHECK: cmp
; CHECK: it eq
; CHECK: cmpeq
-; CHECK: itt eq
-; CHECK: moveq
-; CHECK: popeq
br label %tailrecurse
tailrecurse: ; preds = %bb, %entry
diff --git a/test/CodeGen/WebAssembly/exception.ll b/test/CodeGen/WebAssembly/exception.ll
new file mode 100644
index 0000000000000..eedb5c78b241f
--- /dev/null
+++ b/test/CodeGen/WebAssembly/exception.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+declare void @llvm.wasm.throw(i32, i8*)
+declare void @llvm.wasm.rethrow()
+
+; CHECK-LABEL: throw:
+; CHECK-NEXT: i32.const $push0=, 0
+; CHECK-NEXT: throw 0, $pop0
+define void @throw() {
+ call void @llvm.wasm.throw(i32 0, i8* null)
+ ret void
+}
+
+; CHECK-LABEL: rethrow:
+; CHECK-NEXT: rethrow 0
+define void @rethrow() {
+ call void @llvm.wasm.rethrow()
+ ret void
+}
diff --git a/test/CodeGen/X86/GlobalISel/and-scalar.ll b/test/CodeGen/X86/GlobalISel/and-scalar.ll
new file mode 100644
index 0000000000000..b193214210877
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/and-scalar.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL
+
+define i8 @test_and_i8(i8 %arg1, i8 %arg2) {
+; ALL-LABEL: test_and_i8:
+; ALL: # BB#0:
+; ALL-NEXT: andb %dil, %sil
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = and i8 %arg1, %arg2
+ ret i8 %ret
+}
+
+define i16 @test_and_i16(i16 %arg1, i16 %arg2) {
+; ALL-LABEL: test_and_i16:
+; ALL: # BB#0:
+; ALL-NEXT: andw %di, %si
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = and i16 %arg1, %arg2
+ ret i16 %ret
+}
+
+define i32 @test_and_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_and_i32:
+; ALL: # BB#0:
+; ALL-NEXT: andl %edi, %esi
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = and i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i64 @test_and_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_and_i64:
+; ALL: # BB#0:
+; ALL-NEXT: andq %rdi, %rsi
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ %ret = and i64 %arg1, %arg2
+ ret i64 %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/fadd-scalar.ll b/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
new file mode 100644
index 0000000000000..6aee06a75f6a4
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+define float @test_fadd_float(float %arg1, float %arg2) {
+; ALL-LABEL: test_fadd_float:
+; ALL: # BB#0:
+; ALL-NEXT: addss %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_fadd_double(double %arg1, double %arg2) {
+; ALL-LABEL: test_fadd_double:
+; ALL: # BB#0:
+; ALL-NEXT: addsd %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll b/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
new file mode 100644
index 0000000000000..268802dc06aac
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+define float @test_fdiv_float(float %arg1, float %arg2) {
+; ALL-LABEL: test_fdiv_float:
+; ALL: # BB#0:
+; ALL-NEXT: divss %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fdiv float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_fdiv_double(double %arg1, double %arg2) {
+; ALL-LABEL: test_fdiv_double:
+; ALL: # BB#0:
+; ALL-NEXT: divsd %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fdiv double %arg1, %arg2
+ ret double %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/fmul-scalar.ll b/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
new file mode 100644
index 0000000000000..c7a37a14c33ca
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+define float @test_fmul_float(float %arg1, float %arg2) {
+; ALL-LABEL: test_fmul_float:
+; ALL: # BB#0:
+; ALL-NEXT: mulss %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fmul float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_fmul_double(double %arg1, double %arg2) {
+; ALL-LABEL: test_fmul_double:
+; ALL: # BB#0:
+; ALL-NEXT: mulsd %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fmul double %arg1, %arg2
+ ret double %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/fsub-scalar.ll b/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
new file mode 100644
index 0000000000000..32c25a3a0822a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+define float @test_fsub_float(float %arg1, float %arg2) {
+; ALL-LABEL: test_fsub_float:
+; ALL: # BB#0:
+; ALL-NEXT: subss %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fsub float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_fsub_double(double %arg1, double %arg2) {
+; ALL-LABEL: test_fsub_double:
+; ALL: # BB#0:
+; ALL-NEXT: subsd %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fsub double %arg1, %arg2
+ ret double %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
new file mode 100644
index 0000000000000..b57db15d4646e
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
@@ -0,0 +1,124 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define i8 @test_and_i8() {
+ %ret = and i8 undef, undef
+ ret i8 %ret
+ }
+
+ define i16 @test_and_i16() {
+ %ret = and i16 undef, undef
+ ret i16 %ret
+ }
+
+ define i32 @test_and_i32() {
+ %ret = and i32 undef, undef
+ ret i32 %ret
+ }
+
+ define i64 @test_and_i64() {
+ %ret = and i64 undef, undef
+ ret i64 %ret
+ }
+
+...
+---
+name: test_and_i8
+# CHECK-LABEL: name: test_and_i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s8) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s8) = G_AND %0, %0
+# CHECK-NEXT: %al = COPY %1(s8)
+# CHECK-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = IMPLICIT_DEF
+ %1(s8) = G_AND %0, %0
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_and_i16
+# CHECK-LABEL: name: test_and_i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s16) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s16) = G_AND %0, %0
+# CHECK-NEXT: %ax = COPY %1(s16)
+# CHECK-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = IMPLICIT_DEF
+ %1(s16) = G_AND %0, %0
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_and_i32
+# CHECK-LABEL: name: test_and_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s32) = G_AND %0, %0
+# CHECK-NEXT: %eax = COPY %1(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %1(s32) = G_AND %0, %0
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_and_i64
+# CHECK-LABEL: name: test_and_i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s64) = G_AND %0, %0
+# CHECK-NEXT: %rax = COPY %1(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = IMPLICIT_DEF
+ %1(s64) = G_AND %0, %0
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
new file mode 100644
index 0000000000000..353a26ca2c8a5
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define float @test_fadd_float(float %arg1, float %arg2) {
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fadd_double(double %arg1, double %arg2) {
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fadd_float
+# CHECK-LABEL: name: test_fadd_float
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = COPY %xmm0
+# CHECK-NEXT: %1(s32) = COPY %xmm1
+# CHECK-NEXT: %2(s32) = G_FADD %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FADD %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fadd_double
+# CHECK-LABEL: name: test_fadd_double
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = COPY %xmm0
+# CHECK-NEXT: %1(s64) = COPY %xmm1
+# CHECK-NEXT: %2(s64) = G_FADD %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FADD %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
new file mode 100644
index 0000000000000..102d95c6390cb
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define float @test_fdiv_float(float %arg1, float %arg2) {
+ %ret = fdiv float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fdiv_double(double %arg1, double %arg2) {
+ %ret = fdiv double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fdiv_float
+# CHECK-LABEL: name: test_fdiv_float
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = COPY %xmm0
+# CHECK-NEXT: %1(s32) = COPY %xmm1
+# CHECK-NEXT: %2(s32) = G_FDIV %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FDIV %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fdiv_double
+# CHECK-LABEL: name: test_fdiv_double
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = COPY %xmm0
+# CHECK-NEXT: %1(s64) = COPY %xmm1
+# CHECK-NEXT: %2(s64) = G_FDIV %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FDIV %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
new file mode 100644
index 0000000000000..eeacbfcf07b26
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define float @test_fmul_float(float %arg1, float %arg2) {
+ %ret = fmul float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fmul_double(double %arg1, double %arg2) {
+ %ret = fmul double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fmul_float
+# CHECK-LABEL: name: test_fmul_float
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = COPY %xmm0
+# CHECK-NEXT: %1(s32) = COPY %xmm1
+# CHECK-NEXT: %2(s32) = G_FMUL %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FMUL %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fmul_double
+# CHECK-LABEL: name: test_fmul_double
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = COPY %xmm0
+# CHECK-NEXT: %1(s64) = COPY %xmm1
+# CHECK-NEXT: %2(s64) = G_FMUL %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FMUL %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
new file mode 100644
index 0000000000000..3b3ee4aa0afbb
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define float @test_fsub_float(float %arg1, float %arg2) {
+ %ret = fsub float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fsub_double(double %arg1, double %arg2) {
+ %ret = fsub double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fsub_float
+# CHECK-LABEL: name: test_fsub_float
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = COPY %xmm0
+# CHECK-NEXT: %1(s32) = COPY %xmm1
+# CHECK-NEXT: %2(s32) = G_FSUB %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fsub_double
+# CHECK-LABEL: name: test_fsub_double
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = COPY %xmm0
+# CHECK-NEXT: %1(s64) = COPY %xmm1
+# CHECK-NEXT: %2(s64) = G_FSUB %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
new file mode 100644
index 0000000000000..a014f56a35888
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
@@ -0,0 +1,124 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define i8 @test_or_i8() {
+ %ret = or i8 undef, undef
+ ret i8 %ret
+ }
+
+ define i16 @test_or_i16() {
+ %ret = or i16 undef, undef
+ ret i16 %ret
+ }
+
+ define i32 @test_or_i32() {
+ %ret = or i32 undef, undef
+ ret i32 %ret
+ }
+
+ define i64 @test_or_i64() {
+ %ret = or i64 undef, undef
+ ret i64 %ret
+ }
+
+...
+---
+name: test_or_i8
+# CHECK-LABEL: name: test_or_i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s8) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s8) = G_OR %0, %0
+# CHECK-NEXT: %al = COPY %1(s8)
+# CHECK-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = IMPLICIT_DEF
+ %1(s8) = G_OR %0, %0
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_or_i16
+# CHECK-LABEL: name: test_or_i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s16) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s16) = G_OR %0, %0
+# CHECK-NEXT: %ax = COPY %1(s16)
+# CHECK-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = IMPLICIT_DEF
+ %1(s16) = G_OR %0, %0
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_or_i32
+# CHECK-LABEL: name: test_or_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s32) = G_OR %0, %0
+# CHECK-NEXT: %eax = COPY %1(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %1(s32) = G_OR %0, %0
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_or_i64
+# CHECK-LABEL: name: test_or_i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s64) = G_OR %0, %0
+# CHECK-NEXT: %rax = COPY %1(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = IMPLICIT_DEF
+ %1(s64) = G_OR %0, %0
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
new file mode 100644
index 0000000000000..e2af91283026e
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
@@ -0,0 +1,124 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define i8 @test_xor_i8() {
+ %ret = xor i8 undef, undef
+ ret i8 %ret
+ }
+
+ define i16 @test_xor_i16() {
+ %ret = xor i16 undef, undef
+ ret i16 %ret
+ }
+
+ define i32 @test_xor_i32() {
+ %ret = xor i32 undef, undef
+ ret i32 %ret
+ }
+
+ define i64 @test_xor_i64() {
+ %ret = xor i64 undef, undef
+ ret i64 %ret
+ }
+
+...
+---
+name: test_xor_i8
+# CHECK-LABEL: name: test_xor_i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s8) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s8) = G_XOR %0, %0
+# CHECK-NEXT: %al = COPY %1(s8)
+# CHECK-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = IMPLICIT_DEF
+ %1(s8) = G_XOR %0, %0
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_xor_i16
+# CHECK-LABEL: name: test_xor_i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s16) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s16) = G_XOR %0, %0
+# CHECK-NEXT: %ax = COPY %1(s16)
+# CHECK-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = IMPLICIT_DEF
+ %1(s16) = G_XOR %0, %0
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_xor_i32
+# CHECK-LABEL: name: test_xor_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s32) = G_XOR %0, %0
+# CHECK-NEXT: %eax = COPY %1(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %1(s32) = G_XOR %0, %0
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_xor_i64
+# CHECK-LABEL: name: test_xor_i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s64) = G_XOR %0, %0
+# CHECK-NEXT: %rax = COPY %1(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = IMPLICIT_DEF
+ %1(s64) = G_XOR %0, %0
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/or-scalar.ll b/test/CodeGen/X86/GlobalISel/or-scalar.ll
new file mode 100644
index 0000000000000..b0371457f76e5
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/or-scalar.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL
+
+define i8 @test_or_i8(i8 %arg1, i8 %arg2) {
+; ALL-LABEL: test_or_i8:
+; ALL: # BB#0:
+; ALL-NEXT: orb %dil, %sil
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = or i8 %arg1, %arg2
+ ret i8 %ret
+}
+
+define i16 @test_or_i16(i16 %arg1, i16 %arg2) {
+; ALL-LABEL: test_or_i16:
+; ALL: # BB#0:
+; ALL-NEXT: orw %di, %si
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = or i16 %arg1, %arg2
+ ret i16 %ret
+}
+
+define i32 @test_or_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_or_i32:
+; ALL: # BB#0:
+; ALL-NEXT: orl %edi, %esi
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = or i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i64 @test_or_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_or_i64:
+; ALL: # BB#0:
+; ALL-NEXT: orq %rdi, %rsi
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ %ret = or i64 %arg1, %arg2
+ ret i64 %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index 7bcc57aef4ac5..3658bc9af957a 100644
--- a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -35,6 +35,25 @@
%ret = fadd double %arg1, %arg2
ret double %ret
}
+
+ define void @test_fsub_float() {
+ %ret1 = fsub float undef, undef
+ %ret2 = fsub double undef, undef
+ ret void
+ }
+
+ define void @test_fmul_float() {
+ %ret1 = fmul float undef, undef
+ %ret2 = fmul double undef, undef
+ ret void
+ }
+
+ define void @test_fdiv_float() {
+ %ret1 = fdiv float undef, undef
+ %ret2 = fdiv double undef, undef
+ ret void
+ }
+
define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
%ret = add <4 x i32> %arg1, %arg2
@@ -135,6 +154,26 @@
ret i1 %r
}
+ define i8 @test_xor_i8() {
+ %ret = xor i8 undef, undef
+ ret i8 %ret
+ }
+
+ define i16 @test_or_i16() {
+ %ret = or i16 undef, undef
+ ret i16 %ret
+ }
+
+ define i32 @test_and_i32() {
+ %ret = and i32 undef, undef
+ ret i32 %ret
+ }
+
+ define i64 @test_and_i64() {
+ %ret = and i64 undef, undef
+ ret i64 %ret
+ }
+
...
---
name: test_add_i8
@@ -338,6 +377,105 @@ body: |
...
---
+name: test_fsub_float
+# CHECK-LABEL: name: test_fsub_float
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 3, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 4, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 5, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 6, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 7, class: vecr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %2(s64) = IMPLICIT_DEF
+ %1(s32) = G_FSUB %0, %0
+ %3(s64) = G_FSUB %2, %2
+ RET 0
+
+...
+---
+name: test_fmul_float
+# CHECK-LABEL: name: test_fmul_float
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 3, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 4, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 5, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 6, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 7, class: vecr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %2(s64) = IMPLICIT_DEF
+ %1(s32) = G_FMUL %0, %0
+ %3(s64) = G_FMUL %2, %2
+ RET 0
+
+...
+---
+name: test_fdiv_float
+# CHECK-LABEL: name: test_fdiv_float
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 3, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 4, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 5, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 6, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 7, class: vecr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %2(s64) = IMPLICIT_DEF
+ %1(s32) = G_FDIV %0, %0
+ %3(s64) = G_FDIV %2, %2
+ RET 0
+
+...
+---
name: test_add_v4i32
alignment: 4
legalized: true
@@ -850,3 +988,100 @@ body: |
RET 0, implicit %al
...
+---
+name: test_xor_i8
+# CHECK-LABEL: name: test_xor_i8
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = IMPLICIT_DEF
+ %1(s8) = G_XOR %0, %0
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_or_i16
+# CHECK-LABEL: name: test_or_i16
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = IMPLICIT_DEF
+ %1(s16) = G_OR %0, %0
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_and_i32
+# CHECK-LABEL: name: test_and_i32
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %1(s32) = G_AND %0, %0
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_and_i64
+# CHECK-LABEL: name: test_and_i64
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = IMPLICIT_DEF
+ %1(s64) = G_AND %0, %0
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/select-add.mir b/test/CodeGen/X86/GlobalISel/select-add.mir
index 78e6bb6913a41..45811c5cdc26a 100644
--- a/test/CodeGen/X86/GlobalISel/select-add.mir
+++ b/test/CodeGen/X86/GlobalISel/select-add.mir
@@ -24,16 +24,6 @@
ret i8 %ret
}
- define float @test_add_float(float %arg1, float %arg2) {
- %ret = fadd float %arg1, %arg2
- ret float %ret
- }
-
- define double @test_add_double(double %arg1, double %arg2) {
- %ret = fadd double %arg1, %arg2
- ret double %ret
- }
-
define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
%ret = add <4 x i32> %arg1, %arg2
ret <4 x i32> %ret
@@ -157,76 +147,6 @@ body: |
...
---
-name: test_add_float
-# ALL-LABEL: name: test_add_float
-alignment: 4
-legalized: true
-regBankSelected: true
-selected: false
-tracksRegLiveness: true
-# ALL: registers:
-# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
-registers:
- - { id: 0, class: vecr }
- - { id: 1, class: vecr }
- - { id: 2, class: vecr }
-# ALL: %0 = COPY %xmm0
-# ALL-NEXT: %1 = COPY %xmm1
-# SSE-NEXT: %2 = ADDSSrr %0, %1
-# AVX-NEXT: %2 = VADDSSrr %0, %1
-# AVX512F-NEXT: %2 = VADDSSZrr %0, %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
-
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
- %2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
-
-...
----
-name: test_add_double
-# ALL-LABEL: name: test_add_double
-alignment: 4
-legalized: true
-regBankSelected: true
-selected: false
-tracksRegLiveness: true
-# ALL: registers:
-# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
-registers:
- - { id: 0, class: vecr }
- - { id: 1, class: vecr }
- - { id: 2, class: vecr }
-# ALL: %0 = COPY %xmm0
-# ALL-NEXT: %1 = COPY %xmm1
-# SSE-NEXT: %2 = ADDSDrr %0, %1
-# AVX-NEXT: %2 = VADDSDrr %0, %1
-# AVX512F-NEXT: %2 = VADDSDZrr %0, %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
-
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
- %2(s64) = G_FADD %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
-
-...
----
name: test_add_v4i32
# ALL-LABEL: name: test_add_v4i32
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/select-and-scalar.mir b/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
new file mode 100644
index 0000000000000..c40cc224d50e8
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
@@ -0,0 +1,160 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define i8 @test_and_i8(i8 %arg1, i8 %arg2) {
+ %ret = and i8 %arg1, %arg2
+ ret i8 %ret
+ }
+
+ define i16 @test_and_i16(i16 %arg1, i16 %arg2) {
+ %ret = and i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_and_i32(i32 %arg1, i32 %arg2) {
+ %ret = and i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_and_i64(i64 %arg1, i64 %arg2) {
+ %ret = and i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+...
+---
+name: test_and_i8
+# ALL-LABEL: name: test_and_i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr8, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %1 = COPY %sil
+# ALL-NEXT: %2 = AND8rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %al = COPY %2
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s8) = G_AND %0, %1
+ %al = COPY %2(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_and_i16
+# ALL-LABEL: name: test_and_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr16, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %di
+# ALL-NEXT: %1 = COPY %si
+# ALL-NEXT: %2 = AND16rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %ax = COPY %2
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_AND %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_and_i32
+# ALL-LABEL: name: test_and_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = AND32rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %2
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_AND %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_and_i64
+# ALL-LABEL: name: test_and_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr64, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = AND64rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %rax = COPY %2
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_AND %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-constant.mir b/test/CodeGen/X86/GlobalISel/select-constant.mir
index 7902a5084ce6b..4b91b5f9f0982 100644
--- a/test/CodeGen/X86/GlobalISel/select-constant.mir
+++ b/test/CodeGen/X86/GlobalISel/select-constant.mir
@@ -13,6 +13,10 @@
ret i32 4
}
+ define i32 @const_i32_0() {
+ ret i32 0
+ }
+
define i64 @const_i64() {
ret i64 68719476720
}
@@ -84,6 +88,23 @@ body: |
...
---
+name: const_i32_0
+# CHECK-LABEL: name: const_i32_0
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr }
+# CHECK: %0 = MOV32r0 implicit-def %eflags
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = G_CONSTANT i32 0
+ %eax = COPY %0(s32)
+ RET 0, implicit %eax
+
+...
+---
name: const_i64
legalized: true
regBankSelected: true
diff --git a/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir b/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
new file mode 100644
index 0000000000000..fa4c529982cc5
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+--- |
+
+ define float @test_fadd_float(float %arg1, float %arg2) {
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fadd_double(double %arg1, double %arg2) {
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fadd_float
+# ALL-LABEL: name: test_fadd_float
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = ADDSSrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VADDSSrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VADDSSZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FADD %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fadd_double
+# ALL-LABEL: name: test_fadd_double
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = ADDSDrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VADDSDrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VADDSDZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FADD %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir b/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
new file mode 100644
index 0000000000000..d2c1d15286526
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+--- |
+
+ define float @test_fdiv_float(float %arg1, float %arg2) {
+ %ret = fdiv float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fdiv_double(double %arg1, double %arg2) {
+ %ret = fdiv double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fdiv_float
+# ALL-LABEL: name: test_fdiv_float
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = DIVSSrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VDIVSSrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VDIVSSZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FDIV %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fdiv_double
+# ALL-LABEL: name: test_fdiv_double
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = DIVSDrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VDIVSDrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VDIVSDZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FDIV %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir b/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
new file mode 100644
index 0000000000000..98e5d303d7b16
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+--- |
+
+ define float @test_fmul_float(float %arg1, float %arg2) {
+ %ret = fmul float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fmul_double(double %arg1, double %arg2) {
+ %ret = fmul double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fmul_float
+# ALL-LABEL: name: test_fmul_float
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = MULSSrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VMULSSrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VMULSSZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FMUL %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fmul_double
+# ALL-LABEL: name: test_fmul_double
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = MULSDrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VMULSDrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VMULSDZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FMUL %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir b/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
new file mode 100644
index 0000000000000..9f58327d9bb67
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+--- |
+
+ define float @test_fsub_float(float %arg1, float %arg2) {
+ %ret = fsub float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fsub_double(double %arg1, double %arg2) {
+ %ret = fsub double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fsub_float
+# ALL-LABEL: name: test_fsub_float
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = SUBSSrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VSUBSSrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VSUBSSZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fsub_double
+# ALL-LABEL: name: test_fsub_double
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = SUBSDrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VSUBSDrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VSUBSDZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir b/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
new file mode 100644
index 0000000000000..8e31a904e3607
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
@@ -0,0 +1,52 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
+--- |
+ define void @test_merge() {
+ ret void
+ }
+...
+---
+name: test_merge
+# AVX-LABEL: name: test_merge
+#
+# AVX512VL-LABEL: name: test_merge
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX: registers:
+# AVX-NEXT: - { id: 0, class: vr128, preferred-register: '' }
+# AVX-NEXT: - { id: 1, class: vr256, preferred-register: '' }
+# AVX-NEXT: - { id: 2, class: vr256, preferred-register: '' }
+# AVX-NEXT: - { id: 3, class: vr256, preferred-register: '' }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128x, preferred-register: '' }
+# AVX512VL-NEXT: - { id: 1, class: vr256x, preferred-register: '' }
+# AVX512VL-NEXT: - { id: 2, class: vr256x, preferred-register: '' }
+# AVX512VL-NEXT: - { id: 3, class: vr256x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+# AVX: %0 = IMPLICIT_DEF
+# AVX-NEXT: undef %2.sub_xmm = COPY %0
+# AVX-NEXT: %3 = VINSERTF128rr %2, %0, 1
+# AVX-NEXT: %1 = COPY %3
+# AVX-NEXT: %ymm0 = COPY %1
+# AVX-NEXT: RET 0, implicit %ymm0
+#
+# AVX512VL: %0 = IMPLICIT_DEF
+# AVX512VL-NEXT: undef %2.sub_xmm = COPY %0
+# AVX512VL-NEXT: %3 = VINSERTF32x4Z256rr %2, %0, 1
+# AVX512VL-NEXT: %1 = COPY %3
+# AVX512VL-NEXT: %ymm0 = COPY %1
+# AVX512VL-NEXT: RET 0, implicit %ymm0
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<4 x s32>) = IMPLICIT_DEF
+ %1(<8 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>)
+ %ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit %ymm0
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir b/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
new file mode 100644
index 0000000000000..a072d582e505f
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+--- |
+ define void @test_merge_v128() {
+ ret void
+ }
+
+ define void @test_merge_v256() {
+ ret void
+ }
+
+...
+---
+name: test_merge_v128
+# ALL-LABEL: name: test_merge_v128
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr128x, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 4, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 5, class: vr512, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+# ALL: %0 = IMPLICIT_DEF
+# ALL-NEXT: undef %2.sub_xmm = COPY %0
+# ALL-NEXT: %3 = VINSERTF32x4Zrr %2, %0, 1
+# ALL-NEXT: %4 = VINSERTF32x4Zrr %3, %0, 2
+# ALL-NEXT: %5 = VINSERTF32x4Zrr %4, %0, 3
+# ALL-NEXT: %1 = COPY %5
+# ALL-NEXT: %zmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit %zmm0
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<4 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>)
+ %zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_merge_v256
+# ALL-LABEL: name: test_merge_v256
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr256x, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: vr512, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+# ALL: %0 = IMPLICIT_DEF
+# ALL-NEXT: undef %2.sub_ymm = COPY %0
+# ALL-NEXT: %3 = VINSERTF64x4Zrr %2, %0, 1
+# ALL-NEXT: %1 = COPY %3
+# ALL-NEXT: %zmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit %zmm0
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<8 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = G_MERGE_VALUES %0(<8 x s32>), %0(<8 x s32>)
+ %zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit %zmm0
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/select-or-scalar.mir b/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
new file mode 100644
index 0000000000000..4f7e482078386
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
@@ -0,0 +1,160 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define i8 @test_or_i8(i8 %arg1, i8 %arg2) {
+ %ret = or i8 %arg1, %arg2
+ ret i8 %ret
+ }
+
+ define i16 @test_or_i16(i16 %arg1, i16 %arg2) {
+ %ret = or i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_or_i32(i32 %arg1, i32 %arg2) {
+ %ret = or i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_or_i64(i64 %arg1, i64 %arg2) {
+ %ret = or i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+...
+---
+name: test_or_i8
+# ALL-LABEL: name: test_or_i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr8, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %1 = COPY %sil
+# ALL-NEXT: %2 = OR8rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %al = COPY %2
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s8) = G_OR %0, %1
+ %al = COPY %2(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_or_i16
+# ALL-LABEL: name: test_or_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr16, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %di
+# ALL-NEXT: %1 = COPY %si
+# ALL-NEXT: %2 = OR16rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %ax = COPY %2
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_OR %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_or_i32
+# ALL-LABEL: name: test_or_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = OR32rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %2
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_OR %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_or_i64
+# ALL-LABEL: name: test_or_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr64, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = OR64rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %rax = COPY %2
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_OR %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-sub.mir b/test/CodeGen/X86/GlobalISel/select-sub.mir
index 4768a2d93222d..d47f77828c9b0 100644
--- a/test/CodeGen/X86/GlobalISel/select-sub.mir
+++ b/test/CodeGen/X86/GlobalISel/select-sub.mir
@@ -14,16 +14,6 @@
ret i32 %ret
}
- define float @test_sub_float(float %arg1, float %arg2) {
- %ret = fsub float %arg1, %arg2
- ret float %ret
- }
-
- define double @test_sub_double(double %arg1, double %arg2) {
- %ret = fsub double %arg1, %arg2
- ret double %ret
- }
-
define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
%ret = sub <4 x i32> %arg1, %arg2
ret <4 x i32> %ret
@@ -87,73 +77,6 @@ body: |
...
---
-name: test_sub_float
-alignment: 4
-legalized: true
-regBankSelected: true
-selected: false
-tracksRegLiveness: true
-# ALL: registers:
-# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
-registers:
- - { id: 0, class: vecr }
- - { id: 1, class: vecr }
- - { id: 2, class: vecr }
-# ALL: %0 = COPY %xmm0
-# ALL-NEXT: %1 = COPY %xmm1
-# SSE-NEXT: %2 = SUBSSrr %0, %1
-# AVX-NEXT: %2 = VSUBSSrr %0, %1
-# AVX512F-NEXT: %2 = VSUBSSZrr %0, %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
-
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
- %2(s32) = G_FSUB %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
-
-...
----
-name: test_sub_double
-alignment: 4
-legalized: true
-regBankSelected: true
-selected: false
-tracksRegLiveness: true
-# ALL: registers:
-# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
-registers:
- - { id: 0, class: vecr }
- - { id: 1, class: vecr }
- - { id: 2, class: vecr }
-# ALL: %0 = COPY %xmm0
-# ALL-NEXT: %1 = COPY %xmm1
-# SSE-NEXT: %2 = SUBSDrr %0, %1
-# AVX-NEXT: %2 = VSUBSDrr %0, %1
-# AVX512F-NEXT: %2 = VSUBSDZrr %0, %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
-
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
- %2(s64) = G_FSUB %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
-...
----
name: test_sub_v4i32
alignment: 4
legalized: true
diff --git a/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir b/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
new file mode 100644
index 0000000000000..9d03c6a3f1a86
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
@@ -0,0 +1,160 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define i8 @test_xor_i8(i8 %arg1, i8 %arg2) {
+ %ret = xor i8 %arg1, %arg2
+ ret i8 %ret
+ }
+
+ define i16 @test_xor_i16(i16 %arg1, i16 %arg2) {
+ %ret = xor i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_xor_i32(i32 %arg1, i32 %arg2) {
+ %ret = xor i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_xor_i64(i64 %arg1, i64 %arg2) {
+ %ret = xor i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+...
+---
+name: test_xor_i8
+# ALL-LABEL: name: test_xor_i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr8, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %1 = COPY %sil
+# ALL-NEXT: %2 = XOR8rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %al = COPY %2
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s8) = G_XOR %0, %1
+ %al = COPY %2(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_xor_i16
+# ALL-LABEL: name: test_xor_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr16, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %di
+# ALL-NEXT: %1 = COPY %si
+# ALL-NEXT: %2 = XOR16rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %ax = COPY %2
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_XOR %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_xor_i32
+# ALL-LABEL: name: test_xor_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = XOR32rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %2
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_XOR %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_xor_i64
+# ALL-LABEL: name: test_xor_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr64, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = XOR64rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %rax = COPY %2
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_XOR %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/xor-scalar.ll b/test/CodeGen/X86/GlobalISel/xor-scalar.ll
new file mode 100644
index 0000000000000..9941db8abd9cc
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/xor-scalar.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL
+
+define i8 @test_xor_i8(i8 %arg1, i8 %arg2) {
+; ALL-LABEL: test_xor_i8:
+; ALL: # BB#0:
+; ALL-NEXT: xorb %dil, %sil
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = xor i8 %arg1, %arg2
+ ret i8 %ret
+}
+
+define i16 @test_xor_i16(i16 %arg1, i16 %arg2) {
+; ALL-LABEL: test_xor_i16:
+; ALL: # BB#0:
+; ALL-NEXT: xorw %di, %si
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = xor i16 %arg1, %arg2
+ ret i16 %ret
+}
+
+define i32 @test_xor_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_xor_i32:
+; ALL: # BB#0:
+; ALL-NEXT: xorl %edi, %esi
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = xor i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i64 @test_xor_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_xor_i64:
+; ALL: # BB#0:
+; ALL-NEXT: xorq %rdi, %rsi
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ %ret = xor i64 %arg1, %arg2
+ ret i64 %ret
+}
+
diff --git a/test/CodeGen/X86/atom-call-reg-indirect.ll b/test/CodeGen/X86/atom-call-reg-indirect.ll
index 663b6f1eee51f..8045abc7bad65 100644
--- a/test/CodeGen/X86/atom-call-reg-indirect.ll
+++ b/test/CodeGen/X86/atom-call-reg-indirect.ll
@@ -4,6 +4,8 @@
; RUN: llc < %s -mcpu=core2 -mtriple=x86_64-linux | FileCheck -check-prefix=ATOM-NOT64 %s
; RUN: llc < %s -mcpu=slm -mtriple=i686-linux | FileCheck -check-prefix=SLM32 %s
; RUN: llc < %s -mcpu=slm -mtriple=x86_64-linux | FileCheck -check-prefix=SLM64 %s
+; RUN: llc < %s -mcpu=goldmont -mtriple=i686-linux | FileCheck -check-prefix=SLM32 %s
+; RUN: llc < %s -mcpu=goldmont -mtriple=x86_64-linux | FileCheck -check-prefix=SLM64 %s
; fn_ptr.ll
diff --git a/test/CodeGen/X86/atom-fixup-lea2.ll b/test/CodeGen/X86/atom-fixup-lea2.ll
index ec82613887347..68b376ea5cc23 100644
--- a/test/CodeGen/X86/atom-fixup-lea2.ll
+++ b/test/CodeGen/X86/atom-fixup-lea2.ll
@@ -1,4 +1,6 @@
; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s
+; RUN: llc < %s -mcpu=goldmont -mtriple=i686-linux | FileCheck %s
+
; CHECK:BB#5
; CHECK-NEXT:leal
; CHECK-NEXT:leal
diff --git a/test/CodeGen/X86/atom-sched.ll b/test/CodeGen/X86/atom-sched.ll
index b81359e2832b9..bddb015a0dd5c 100644
--- a/test/CodeGen/X86/atom-sched.ll
+++ b/test/CodeGen/X86/atom-sched.ll
@@ -1,5 +1,6 @@
; RUN: llc <%s -O2 -mcpu=atom -march=x86 -relocation-model=static | FileCheck -check-prefix=atom %s
; RUN: llc <%s -O2 -mcpu=slm -march=x86 -relocation-model=static | FileCheck -check-prefix=slm %s
+; RUN: llc <%s -O2 -mcpu=goldmont -march=x86 -relocation-model=static | FileCheck -check-prefix=slm %s
; RUN: llc <%s -O2 -mcpu=core2 -march=x86 -relocation-model=static | FileCheck %s
;
diff --git a/test/CodeGen/X86/avx2-arith.ll b/test/CodeGen/X86/avx2-arith.ll
index aec74424b9b27..017f54b40b2d5 100644
--- a/test/CodeGen/X86/avx2-arith.ll
+++ b/test/CodeGen/X86/avx2-arith.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: test_vpaddq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <4 x i64> %i, %j
@@ -18,12 +18,12 @@ define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpaddd:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddd:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <8 x i32> %i, %j
@@ -32,12 +32,12 @@ define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpaddw:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddw:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <16 x i16> %i, %j
@@ -46,12 +46,12 @@ define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: test_vpaddb:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddb:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <32 x i8> %i, %j
@@ -60,12 +60,12 @@ define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: test_vpsubq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <4 x i64> %i, %j
@@ -74,12 +74,12 @@ define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpsubd:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubd:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <8 x i32> %i, %j
@@ -88,12 +88,12 @@ define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpsubw:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubw:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <16 x i16> %i, %j
@@ -102,12 +102,12 @@ define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: test_vpsubb:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubb:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <32 x i8> %i, %j
@@ -116,12 +116,12 @@ define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpmulld:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpmulld:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = mul <8 x i32> %i, %j
@@ -130,12 +130,12 @@ define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpmullw:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpmullw:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = mul <16 x i16> %i, %j
@@ -144,7 +144,7 @@ define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-LABEL: mul_v16i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovsxbw %xmm1, %ymm1
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -157,7 +157,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v16i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxbw %xmm1, %ymm1
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -174,7 +174,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: mul_v32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vextracti128 $1, %ymm1, %xmm2
; X32-NEXT: vpmovsxbw %xmm2, %ymm2
; X32-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -196,7 +196,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vextracti128 $1, %ymm1, %xmm2
; X64-NEXT: vpmovsxbw %xmm2, %ymm2
; X64-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -222,7 +222,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: mul_v4i64:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlq $32, %ymm0, %ymm2
; X32-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; X32-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -234,7 +234,7 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v4i64:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlq $32, %ymm0, %ymm2
; X64-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; X64-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -250,12 +250,12 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @mul_const1(<8 x i32> %x) {
; X32-LABEL: mul_const1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -264,12 +264,12 @@ define <8 x i32> @mul_const1(<8 x i32> %x) {
define <4 x i64> @mul_const2(<4 x i64> %x) {
; X32-LABEL: mul_const2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <4 x i64> %x, <i64 4, i64 4, i64 4, i64 4>
@@ -278,12 +278,12 @@ define <4 x i64> @mul_const2(<4 x i64> %x) {
define <16 x i16> @mul_const3(<16 x i16> %x) {
; X32-LABEL: mul_const3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <16 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -292,13 +292,13 @@ define <16 x i16> @mul_const3(<16 x i16> %x) {
define <4 x i64> @mul_const4(<4 x i64> %x) {
; X32-LABEL: mul_const4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X64-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -308,12 +308,12 @@ define <4 x i64> @mul_const4(<4 x i64> %x) {
define <8 x i32> @mul_const5(<8 x i32> %x) {
; X32-LABEL: mul_const5:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vxorps %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const5:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorps %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -322,12 +322,12 @@ define <8 x i32> @mul_const5(<8 x i32> %x) {
define <8 x i32> @mul_const6(<8 x i32> %x) {
; X32-LABEL: mul_const6:
-; X32: ## BB#0:
-; X32-NEXT: vpmulld LCPI18_0, %ymm0, %ymm0
+; X32: # BB#0:
+; X32-NEXT: vpmulld {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const6:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 2, i32 0, i32 2, i32 0, i32 0>
@@ -336,13 +336,13 @@ define <8 x i32> @mul_const6(<8 x i32> %x) {
define <8 x i64> @mul_const7(<8 x i64> %x) {
; X32-LABEL: mul_const7:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X32-NEXT: vpaddq %ymm1, %ymm1, %ymm1
; X32-NEXT: retl
;
; X64-LABEL: mul_const7:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X64-NEXT: vpaddq %ymm1, %ymm1, %ymm1
; X64-NEXT: retq
@@ -352,12 +352,12 @@ define <8 x i64> @mul_const7(<8 x i64> %x) {
define <8 x i16> @mul_const8(<8 x i16> %x) {
; X32-LABEL: mul_const8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $3, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $3, %xmm0, %xmm0
; X64-NEXT: retq
%y = mul <8 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -366,14 +366,14 @@ define <8 x i16> @mul_const8(<8 x i16> %x) {
define <8 x i32> @mul_const9(<8 x i32> %x) {
; X32-LABEL: mul_const9:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl $2, %eax
; X32-NEXT: vmovd %eax, %xmm1
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const9:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: movl $2, %eax
; X64-NEXT: vmovd %eax, %xmm1
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
@@ -385,13 +385,13 @@ define <8 x i32> @mul_const9(<8 x i32> %x) {
; %x * 0x01010101
define <4 x i32> @mul_const10(<4 x i32> %x) {
; X32-LABEL: mul_const10:
-; X32: ## BB#0:
-; X32-NEXT: vpbroadcastd LCPI22_0, %xmm1
+; X32: # BB#0:
+; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const10:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
@@ -402,13 +402,13 @@ define <4 x i32> @mul_const10(<4 x i32> %x) {
; %x * 0x80808080
define <4 x i32> @mul_const11(<4 x i32> %x) {
; X32-LABEL: mul_const11:
-; X32: ## BB#0:
-; X32-NEXT: vpbroadcastd LCPI23_0, %xmm1
+; X32: # BB#0:
+; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const11:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/avx2-cmp.ll b/test/CodeGen/X86/avx2-cmp.ll
index e2b550383c8d9..2369aa5ac9a00 100644
--- a/test/CodeGen/X86/avx2-cmp.ll
+++ b/test/CodeGen/X86/avx2-cmp.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <8 x i32> @v8i32_cmpgt(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: v8i32_cmpgt:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v8i32_cmpgt:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <8 x i32> %i, %j
@@ -19,12 +19,12 @@ define <8 x i32> @v8i32_cmpgt(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <4 x i64> @v4i64_cmpgt(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: v4i64_cmpgt:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v4i64_cmpgt:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <4 x i64> %i, %j
@@ -34,12 +34,12 @@ define <4 x i64> @v4i64_cmpgt(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <16 x i16> @v16i16_cmpgt(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: v16i16_cmpgt:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v16i16_cmpgt:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <16 x i16> %i, %j
@@ -49,12 +49,12 @@ define <16 x i16> @v16i16_cmpgt(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <32 x i8> @v32i8_cmpgt(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: v32i8_cmpgt:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v32i8_cmpgt:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <32 x i8> %i, %j
@@ -64,12 +64,12 @@ define <32 x i8> @v32i8_cmpgt(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: int256_cmpeq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: int256_cmpeq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <8 x i32> %i, %j
@@ -79,12 +79,12 @@ define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: v4i64_cmpeq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v4i64_cmpeq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <4 x i64> %i, %j
@@ -94,12 +94,12 @@ define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: v16i16_cmpeq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v16i16_cmpeq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <16 x i16> %i, %j
@@ -109,12 +109,12 @@ define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: v32i8_cmpeq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v32i8_cmpeq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <32 x i8> %i, %j
diff --git a/test/CodeGen/X86/avx2-conversions.ll b/test/CodeGen/X86/avx2-conversions.ll
index 26edafbdb64fd..60cc2cf199e64 100755
--- a/test/CodeGen/X86/avx2-conversions.ll
+++ b/test/CodeGen/X86/avx2-conversions.ll
@@ -1,21 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
; X32-LABEL: trunc4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: trunc4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <4 x i64> %A to <4 x i32>
@@ -24,18 +24,18 @@ define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
; X32-LABEL: trunc8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: trunc8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <8 x i32> %A to <8 x i16>
@@ -44,12 +44,12 @@ define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
define <4 x i64> @sext4(<4 x i32> %A) nounwind {
; X32-LABEL: sext4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovsxdq %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: retq
%B = sext <4 x i32> %A to <4 x i64>
@@ -58,12 +58,12 @@ define <4 x i64> @sext4(<4 x i32> %A) nounwind {
define <8 x i32> @sext8(<8 x i16> %A) nounwind {
; X32-LABEL: sext8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: retq
%B = sext <8 x i16> %A to <8 x i32>
@@ -72,12 +72,12 @@ define <8 x i32> @sext8(<8 x i16> %A) nounwind {
define <4 x i64> @zext4(<4 x i32> %A) nounwind {
; X32-LABEL: zext4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: zext4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: retq
%B = zext <4 x i32> %A to <4 x i64>
@@ -86,12 +86,12 @@ define <4 x i64> @zext4(<4 x i32> %A) nounwind {
define <8 x i32> @zext8(<8 x i16> %A) nounwind {
; X32-LABEL: zext8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: zext8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
%B = zext <8 x i16> %A to <8 x i32>
@@ -100,13 +100,13 @@ define <8 x i32> @zext8(<8 x i16> %A) nounwind {
define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
; X32-LABEL: zext_8i8_8i32:
-; X32: ## BB#0:
-; X32-NEXT: vpand LCPI6_0, %xmm0, %xmm0
+; X32: # BB#0:
+; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: zext_8i8_8i32:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
@@ -116,12 +116,12 @@ define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
; X32-LABEL: zext_16i8_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X32-NEXT: retl
;
; X64-LABEL: zext_16i8_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X64-NEXT: retq
%t = zext <16 x i8> %z to <16 x i16>
@@ -130,12 +130,12 @@ define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
; X32-LABEL: sext_16i8_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_16i8_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-NEXT: retq
%t = sext <16 x i8> %z to <16 x i16>
@@ -144,7 +144,7 @@ define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-LABEL: trunc_16i16_16i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; X32-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -154,7 +154,7 @@ define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-NEXT: retl
;
; X64-LABEL: trunc_16i16_16i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -168,13 +168,13 @@ define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
; X32-LABEL: load_sext_test1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxdq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxdq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i32>, <4 x i32>* %ptr
@@ -184,13 +184,13 @@ define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
; X32-LABEL: load_sext_test2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxbq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i8>, <4 x i8>* %ptr
@@ -200,13 +200,13 @@ define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
; X32-LABEL: load_sext_test3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxwq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i16>, <4 x i16>* %ptr
@@ -216,13 +216,13 @@ define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
; X32-LABEL: load_sext_test4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxwd (%rdi), %ymm0
; X64-NEXT: retq
%X = load <8 x i16>, <8 x i16>* %ptr
@@ -232,13 +232,13 @@ define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
; X32-LABEL: load_sext_test5:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test5:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxbd (%rdi), %ymm0
; X64-NEXT: retq
%X = load <8 x i8>, <8 x i8>* %ptr
diff --git a/test/CodeGen/X86/avx2-fma-fneg-combine.ll b/test/CodeGen/X86/avx2-fma-fneg-combine.ll
index 345943bd7303d..019593cc0f804 100644
--- a/test/CodeGen/X86/avx2-fma-fneg-combine.ll
+++ b/test/CodeGen/X86/avx2-fma-fneg-combine.ll
@@ -1,17 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefix=X64
; This test checks combinations of FNEG and FMA intrinsics
define <8 x float> @test1(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -24,12 +24,12 @@ declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; X32-LABEL: test2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
entry:
@@ -42,14 +42,14 @@ declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a, <4 x float> %b, <4 x
define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; X32-LABEL: test3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
-; X32-NEXT: vbroadcastss LCPI2_0, %xmm1
+; X32-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm1
; X32-NEXT: vxorps %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; X64-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; X64-NEXT: vxorps %xmm1, %xmm0, %xmm0
@@ -64,12 +64,12 @@ declare <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float> %a, <4 x float> %b, <4
define <8 x float> @test4(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test4:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -80,14 +80,14 @@ entry:
define <8 x float> @test5(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test5:
-; X32: ## BB#0: ## %entry
-; X32-NEXT: vbroadcastss LCPI4_0, %ymm3
+; X32: # BB#0: # %entry
+; X32-NEXT: vbroadcastss {{\.LCPI.*}}, %ymm3
; X32-NEXT: vxorps %ymm3, %ymm2, %ymm2
; X32-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vbroadcastss {{.*}}(%rip), %ymm3
; X64-NEXT: vxorps %ymm3, %ymm2, %ymm2
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
@@ -103,12 +103,12 @@ declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x f
define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; X32-LABEL: test6:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test6:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx2-gather.ll b/test/CodeGen/X86/avx2-gather.ll
index d162b4755ee1a..64dd6fa00616d 100644
--- a/test/CodeGen/X86/avx2-gather.ll
+++ b/test/CodeGen/X86/avx2-gather.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*,
<4 x i32>, <4 x float>, i8) nounwind readonly
define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_ps:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
@@ -15,7 +15,7 @@ define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x floa
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdps %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
@@ -30,7 +30,7 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*,
define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_pd:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
@@ -38,7 +38,7 @@ define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x dou
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdpd %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovapd %xmm2, %xmm0
@@ -53,7 +53,7 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*,
define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_ps_256:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %ymm2, %ymm2, %ymm2
; X32-NEXT: vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
@@ -61,7 +61,7 @@ define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorps %ymm2, %ymm2, %ymm2
; X64-NEXT: vgatherdps %ymm1, (%rdi,%ymm0,4), %ymm2
; X64-NEXT: vmovaps %ymm2, %ymm0
@@ -76,7 +76,7 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*,
define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_pd_256:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %ymm2, %ymm2, %ymm2
; X32-NEXT: vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
@@ -84,7 +84,7 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorpd %ymm2, %ymm2, %ymm2
; X64-NEXT: vgatherdpd %ymm1, (%rdi,%xmm0,8), %ymm2
; X64-NEXT: vmovapd %ymm2, %ymm0
@@ -96,7 +96,7 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x
define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_epi32:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -105,7 +105,7 @@ define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_epi32:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -122,7 +122,7 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>
define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_pd:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -131,7 +131,7 @@ define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_pd:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
diff --git a/test/CodeGen/X86/avx2-logic.ll b/test/CodeGen/X86/avx2-logic.ll
index 9208d959a7553..68d486699cbcc 100644
--- a/test/CodeGen/X86/avx2-logic.ll
+++ b/test/CodeGen/X86/avx2-logic.ll
@@ -1,17 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <4 x i64> @vpandn(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpandn:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X32-NEXT: vpandn %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpandn:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X64-NEXT: vpandn %ymm0, %ymm1, %ymm0
@@ -26,14 +26,14 @@ entry:
define <4 x i64> @vpand(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpand:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpand %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpand:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -47,14 +47,14 @@ entry:
define <4 x i64> @vpor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpor:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpor:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
@@ -68,14 +68,14 @@ entry:
define <4 x i64> @vpxor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpxor:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpxor:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -89,14 +89,14 @@ entry:
define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) {
; X32-LABEL: vpblendvb:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $7, %ymm0, %ymm0
-; X32-NEXT: vpand LCPI4_0, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpblendvb:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $7, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
@@ -107,12 +107,12 @@ define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) {
define <8 x i32> @allOnes() nounwind {
; X32-LABEL: allOnes:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: allOnes:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
ret <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -120,12 +120,12 @@ define <8 x i32> @allOnes() nounwind {
define <16 x i16> @allOnes2() nounwind {
; X32-LABEL: allOnes2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: allOnes2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
ret <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
diff --git a/test/CodeGen/X86/avx2-phaddsub.ll b/test/CodeGen/X86/avx2-phaddsub.ll
index 9eafac902b862..232a3326fa139 100644
--- a/test/CodeGen/X86/avx2-phaddsub.ll
+++ b/test/CodeGen/X86/avx2-phaddsub.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <16 x i16> @phaddw1(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phaddw1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddw1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
@@ -20,12 +20,12 @@ define <16 x i16> @phaddw1(<16 x i16> %x, <16 x i16> %y) {
define <16 x i16> @phaddw2(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phaddw2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddw2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31>
@@ -36,12 +36,12 @@ define <16 x i16> @phaddw2(<16 x i16> %x, <16 x i16> %y) {
define <8 x i32> @phaddd1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phaddd1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -52,12 +52,12 @@ define <8 x i32> @phaddd1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phaddd2(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phaddd2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 5, i32 6, i32 13, i32 14>
@@ -68,12 +68,12 @@ define <8 x i32> @phaddd2(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phaddd3(<8 x i32> %x) {
; X32-LABEL: phaddd3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
@@ -84,12 +84,12 @@ define <8 x i32> @phaddd3(<8 x i32> %x) {
define <16 x i16> @phsubw1(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phsubw1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubw1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
@@ -100,12 +100,12 @@ define <16 x i16> @phsubw1(<16 x i16> %x, <16 x i16> %y) {
define <8 x i32> @phsubd1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phsubd1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubd1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -116,12 +116,12 @@ define <8 x i32> @phsubd1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phsubd2(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phsubd2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubd2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 undef, i32 8, i32 undef, i32 4, i32 6, i32 12, i32 14>
diff --git a/test/CodeGen/X86/avx2-shift.ll b/test/CodeGen/X86/avx2-shift.ll
index 4345bd6f79266..47bbba2c7e08c 100644
--- a/test/CodeGen/X86/avx2-shift.ll
+++ b/test/CodeGen/X86/avx2-shift.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_shl0:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl0:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = shl <4 x i32> %x, %y
@@ -18,12 +18,12 @@ define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_shl1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = shl <8 x i32> %x, %y
@@ -32,12 +32,12 @@ define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: variable_shl2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = shl <2 x i64> %x, %y
@@ -46,12 +46,12 @@ define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
; X32-LABEL: variable_shl3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = shl <4 x i64> %x, %y
@@ -60,12 +60,12 @@ define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_srl0:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl0:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = lshr <4 x i32> %x, %y
@@ -74,12 +74,12 @@ define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_srl1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = lshr <8 x i32> %x, %y
@@ -88,12 +88,12 @@ define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: variable_srl2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = lshr <2 x i64> %x, %y
@@ -102,12 +102,12 @@ define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
; X32-LABEL: variable_srl3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = lshr <4 x i64> %x, %y
@@ -116,12 +116,12 @@ define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_sra0:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra0:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = ashr <4 x i32> %x, %y
@@ -130,12 +130,12 @@ define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_sra1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = ashr <8 x i32> %x, %y
@@ -146,12 +146,12 @@ define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift00:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpslld $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift00:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpslld $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -160,12 +160,12 @@ define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift01:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift01:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -174,12 +174,12 @@ define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
; X32-LABEL: vshift02:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift02:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
@@ -190,12 +190,12 @@ define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift03:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrld $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift03:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrld $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -204,12 +204,12 @@ define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift04:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift04:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -218,12 +218,12 @@ define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
; X32-LABEL: vshift05:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift05:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlq $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
@@ -234,12 +234,12 @@ define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift06:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrad $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift06:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrad $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = ashr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -248,12 +248,12 @@ define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift07:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsraw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift07:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsraw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = ashr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -262,13 +262,13 @@ define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_sra0_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsravd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra0_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsravd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -278,13 +278,13 @@ define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_sra1_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsravd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra1_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsravd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -294,13 +294,13 @@ define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_shl0_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl0_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -310,13 +310,13 @@ define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_shl1_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl1_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -326,13 +326,13 @@ define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
; X32-LABEL: variable_shl2_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl2_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <2 x i64>, <2 x i64>* %y
@@ -342,13 +342,13 @@ define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
; X32-LABEL: variable_shl3_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvq (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl3_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvq (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <4 x i64>, <4 x i64>* %y
@@ -358,13 +358,13 @@ define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_srl0_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl0_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -374,13 +374,13 @@ define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_srl1_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl1_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -390,13 +390,13 @@ define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
; X32-LABEL: variable_srl2_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl2_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <2 x i64>, <2 x i64>* %y
@@ -406,13 +406,13 @@ define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
; X32-LABEL: variable_srl3_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvq (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl3_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <4 x i64>, <4 x i64>* %y
@@ -422,13 +422,13 @@ define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
define <32 x i8> @shl9(<32 x i8> %A) nounwind {
; X32-LABEL: shl9:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
-; X32-NEXT: vpand LCPI28_0, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shl9:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -438,13 +438,13 @@ define <32 x i8> @shl9(<32 x i8> %A) nounwind {
define <32 x i8> @shr9(<32 x i8> %A) nounwind {
; X32-LABEL: shr9:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
-; X32-NEXT: vpand LCPI29_0, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shr9:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -454,13 +454,13 @@ define <32 x i8> @shr9(<32 x i8> %A) nounwind {
define <32 x i8> @sra_v32i8_7(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8_7:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sra_v32i8_7:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X64-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -470,16 +470,16 @@ define <32 x i8> @sra_v32i8_7(<32 x i8> %A) nounwind {
define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
-; X32-NEXT: vpand LCPI31_0, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sra_v32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -492,13 +492,13 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
define <16 x i16> @sext_v16i16(<16 x i16> %a) nounwind {
; X32-LABEL: sext_v16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $8, %ymm0, %ymm0
; X32-NEXT: vpsraw $8, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_v16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $8, %ymm0, %ymm0
; X64-NEXT: vpsraw $8, %ymm0, %ymm0
; X64-NEXT: retq
@@ -509,13 +509,13 @@ define <16 x i16> @sext_v16i16(<16 x i16> %a) nounwind {
define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
; X32-LABEL: sext_v8i32:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpslld $16, %ymm0, %ymm0
; X32-NEXT: vpsrad $16, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_v8i32:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpslld $16, %ymm0, %ymm0
; X64-NEXT: vpsrad $16, %ymm0, %ymm0
; X64-NEXT: retq
@@ -526,24 +526,24 @@ define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_shl16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: variable_shl16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shl <8 x i16> %lhs, %rhs
@@ -552,24 +552,24 @@ define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_ashr16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: variable_ashr16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = ashr <8 x i16> %lhs, %rhs
@@ -578,24 +578,24 @@ define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_lshr16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: variable_lshr16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = lshr <8 x i16> %lhs, %rhs
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index 45a1cd9750384..127726ea30da1 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
; AVX2 Logical Shift Left
define <16 x i16> @test_sllw_1(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -19,12 +19,12 @@ entry:
define <16 x i16> @test_sllw_2(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpaddw %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpaddw %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -34,12 +34,12 @@ entry:
define <16 x i16> @test_sllw_3(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsllw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsllw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -49,11 +49,11 @@ entry:
define <8 x i32> @test_slld_1(<8 x i32> %InVec) {
; X32-LABEL: test_slld_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_slld_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -62,12 +62,12 @@ entry:
define <8 x i32> @test_slld_2(<8 x i32> %InVec) {
; X32-LABEL: test_slld_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_slld_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -77,14 +77,14 @@ entry:
define <8 x i32> @test_vpslld_var(i32 %shift) {
; X32-LABEL: test_vpslld_var:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [192,193,194,195,196,197,198,199]
; X32-NEXT: vpslld %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpslld_var:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [192,193,194,195,196,197,198,199]
; X64-NEXT: vpslld %xmm0, %ymm1, %ymm0
@@ -96,12 +96,12 @@ define <8 x i32> @test_vpslld_var(i32 %shift) {
define <8 x i32> @test_slld_3(<8 x i32> %InVec) {
; X32-LABEL: test_slld_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpslld $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_slld_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpslld $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -111,11 +111,11 @@ entry:
define <4 x i64> @test_sllq_1(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0>
@@ -124,12 +124,12 @@ entry:
define <4 x i64> @test_sllq_2(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -139,12 +139,12 @@ entry:
define <4 x i64> @test_sllq_3(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsllq $63, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -156,11 +156,11 @@ entry:
define <16 x i16> @test_sraw_1(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = ashr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -169,12 +169,12 @@ entry:
define <16 x i16> @test_sraw_2(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsraw $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsraw $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -184,12 +184,12 @@ entry:
define <16 x i16> @test_sraw_3(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsraw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsraw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -199,11 +199,11 @@ entry:
define <8 x i32> @test_srad_1(<8 x i32> %InVec) {
; X32-LABEL: test_srad_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srad_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = ashr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -212,12 +212,12 @@ entry:
define <8 x i32> @test_srad_2(<8 x i32> %InVec) {
; X32-LABEL: test_srad_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrad $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srad_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrad $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -227,12 +227,12 @@ entry:
define <8 x i32> @test_srad_3(<8 x i32> %InVec) {
; X32-LABEL: test_srad_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrad $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srad_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -244,11 +244,11 @@ entry:
define <16 x i16> @test_srlw_1(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -257,12 +257,12 @@ entry:
define <16 x i16> @test_srlw_2(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrlw $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrlw $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -272,12 +272,12 @@ entry:
define <16 x i16> @test_srlw_3(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrlw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrlw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -287,11 +287,11 @@ entry:
define <8 x i32> @test_srld_1(<8 x i32> %InVec) {
; X32-LABEL: test_srld_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srld_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -300,12 +300,12 @@ entry:
define <8 x i32> @test_srld_2(<8 x i32> %InVec) {
; X32-LABEL: test_srld_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrld $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srld_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrld $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -315,12 +315,12 @@ entry:
define <8 x i32> @test_srld_3(<8 x i32> %InVec) {
; X32-LABEL: test_srld_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrld $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srld_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrld $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -330,11 +330,11 @@ entry:
define <4 x i64> @test_srlq_1(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0>
@@ -343,12 +343,12 @@ entry:
define <4 x i64> @test_srlq_2(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrlq $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrlq $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -358,12 +358,12 @@ entry:
define <4 x i64> @test_srlq_3(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrlq $63, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrlq $63, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -373,17 +373,17 @@ entry:
define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
; X32-LABEL: srl_trunc_and_v4i64:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; X32-NEXT: vpbroadcastd LCPI25_0, %xmm2
+; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm2
; X32-NEXT: vpand %xmm2, %xmm1, %xmm1
; X32-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: srl_trunc_and_v4i64:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
@@ -403,24 +403,24 @@ define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: shl_8i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: shl_8i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%shl = shl <8 x i16> %r, %a
@@ -429,7 +429,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: shl_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -443,7 +443,7 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -461,13 +461,13 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: shl_32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsllw $4, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI28_0, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsllw $2, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI28_1, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpaddb %ymm0, %ymm0, %ymm2
@@ -476,7 +476,7 @@ define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl_32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpsllw $4, %ymm0, %ymm2
; X64-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -495,24 +495,24 @@ define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: ashr_8i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: ashr_8i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%ashr = ashr <8 x i16> %r, %a
@@ -521,7 +521,7 @@ define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: ashr_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -535,7 +535,7 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ashr_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -553,7 +553,7 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: ashr_32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X32-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -581,7 +581,7 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ashr_32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X64-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -613,24 +613,24 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: lshr_8i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: lshr_8i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%lshr = lshr <8 x i16> %r, %a
@@ -639,7 +639,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: lshr_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -653,7 +653,7 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: lshr_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -671,23 +671,23 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: lshr_32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsrlw $4, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI34_0, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsrlw $2, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI34_1, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsrlw $1, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI34_2, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: lshr_32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpsrlw $4, %ymm0, %ymm2
; X64-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
diff --git a/test/CodeGen/X86/avx2-vperm.ll b/test/CodeGen/X86/avx2-vperm.ll
index d0e18550f6a82..d57daafab243f 100755
--- a/test/CodeGen/X86/avx2-vperm.ll
+++ b/test/CodeGen/X86/avx2-vperm.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <8 x i32> @perm_cl_int_8x32(<8 x i32> %A) nounwind readnone {
; X32-LABEL: perm_cl_int_8x32:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,2,1,2,7,6,0]
; X32-NEXT: vpermd %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_int_8x32:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,2,1,2,7,6,0]
; X64-NEXT: vpermd %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -22,13 +22,13 @@ entry:
define <8 x float> @perm_cl_fp_8x32(<8 x float> %A) nounwind readnone {
; X32-LABEL: perm_cl_fp_8x32:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vmovaps {{.*#+}} ymm1 = <u,7,2,u,4,u,1,6>
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_fp_8x32:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vmovaps {{.*#+}} ymm1 = <u,7,2,u,4,u,1,6>
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -39,12 +39,12 @@ entry:
define <4 x i64> @perm_cl_int_4x64(<4 x i64> %A) nounwind readnone {
; X32-LABEL: perm_cl_int_4x64:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_int_4x64:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X64-NEXT: retq
entry:
@@ -54,12 +54,12 @@ entry:
define <4 x double> @perm_cl_fp_4x64(<4 x double> %A) nounwind readnone {
; X32-LABEL: perm_cl_fp_4x64:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_fp_4x64:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll
index d96b5882556d8..7c0f145bb7173 100644
--- a/test/CodeGen/X86/avx512-arith.ll
+++ b/test/CodeGen/X86/avx512-arith.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck --check-prefix=CHECK --check-prefix=AVX512F %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512vl | FileCheck --check-prefix=CHECK --check-prefix=AVX512VL %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512bw | FileCheck --check-prefix=CHECK --check-prefix=AVX512BW %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512dq | FileCheck --check-prefix=CHECK --check-prefix=AVX512DQ %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512dq -mattr=+avx512bw -mattr=+avx512vl| FileCheck --check-prefix=CHECK --check-prefix=SKX %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512bw,+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
define <8 x double> @addpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: addpd512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -17,7 +17,7 @@ entry:
define <8 x double> @addpd512fold(<8 x double> %y) {
; CHECK-LABEL: addpd512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -27,7 +27,7 @@ entry:
define <16 x float> @addps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: addps512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -37,7 +37,7 @@ entry:
define <16 x float> @addps512fold(<16 x float> %y) {
; CHECK-LABEL: addps512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -47,7 +47,7 @@ entry:
define <8 x double> @subpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: subpd512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsubpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -57,7 +57,7 @@ entry:
define <8 x double> @subpd512fold(<8 x double> %y, <8 x double>* %x) {
; CHECK-LABEL: subpd512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsubpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -68,7 +68,7 @@ entry:
define <16 x float> @subps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: subps512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsubps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -78,7 +78,7 @@ entry:
define <16 x float> @subps512fold(<16 x float> %y, <16 x float>* %x) {
; CHECK-LABEL: subps512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsubps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -89,7 +89,7 @@ entry:
define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512F-LABEL: imulq512:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512F-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512F-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -101,7 +101,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq512:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512VL-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512VL-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -113,7 +113,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq512:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512BW-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -125,12 +125,12 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq512:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq512:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%z = mul <8 x i64>%x, %y
@@ -139,7 +139,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512F-LABEL: imulq256:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512F-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -151,7 +151,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq256:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512VL-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512VL-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -163,7 +163,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq256:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512BW-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512BW-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -175,15 +175,15 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq256:
-; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512DQ-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq256:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpmullq %ymm0, %ymm1, %ymm0
; SKX-NEXT: retq
%z = mul <4 x i64>%x, %y
@@ -192,7 +192,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512F-LABEL: imulq128:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512F-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -204,7 +204,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq128:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -216,7 +216,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq128:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512BW-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -228,16 +228,16 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq128:
-; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq128:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpmullq %xmm0, %xmm1, %xmm0
; SKX-NEXT: retq
%z = mul <2 x i64>%x, %y
@@ -246,7 +246,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
define <8 x double> @mulpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: mulpd512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -256,7 +256,7 @@ entry:
define <8 x double> @mulpd512fold(<8 x double> %y) {
; CHECK-LABEL: mulpd512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vmulpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -266,7 +266,7 @@ entry:
define <16 x float> @mulps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: mulps512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vmulps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -276,7 +276,7 @@ entry:
define <16 x float> @mulps512fold(<16 x float> %y) {
; CHECK-LABEL: mulps512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -286,7 +286,7 @@ entry:
define <8 x double> @divpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: divpd512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vdivpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -296,7 +296,7 @@ entry:
define <8 x double> @divpd512fold(<8 x double> %y) {
; CHECK-LABEL: divpd512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vdivpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -306,7 +306,7 @@ entry:
define <16 x float> @divps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: divps512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vdivps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -316,7 +316,7 @@ entry:
define <16 x float> @divps512fold(<16 x float> %y) {
; CHECK-LABEL: divps512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vdivps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -326,7 +326,7 @@ entry:
define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpaddq_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <8 x i64> %i, %j
@@ -335,7 +335,7 @@ define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
; CHECK-LABEL: vpaddq_fold_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load <8 x i64>, <8 x i64>* %j, align 4
@@ -345,7 +345,7 @@ define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
; CHECK-LABEL: vpaddq_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -354,7 +354,7 @@ define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
; CHECK-LABEL: vpaddq_broadcast2_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load i64, i64* %j
@@ -372,7 +372,7 @@ define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpaddd_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <16 x i32> %i, %j
@@ -381,7 +381,7 @@ define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
; CHECK-LABEL: vpaddd_fold_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load <16 x i32>, <16 x i32>* %j, align 4
@@ -391,7 +391,7 @@ define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
; CHECK-LABEL: vpaddd_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -400,7 +400,7 @@ define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm3, %zmm3, %zmm3
; CHECK-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1}
@@ -413,7 +413,7 @@ define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %ma
define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm3, %zmm3, %zmm3
; CHECK-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -426,7 +426,7 @@ define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %m
define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_fold_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1}
@@ -440,7 +440,7 @@ define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1}
@@ -453,7 +453,7 @@ define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_fold_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
@@ -467,7 +467,7 @@ define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z}
@@ -480,7 +480,7 @@ define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpsubq_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = sub <8 x i64> %i, %j
@@ -489,7 +489,7 @@ define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpsubd_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = sub <16 x i32> %i, %j
@@ -498,7 +498,7 @@ define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
; CHECK-LABEL: vpmulld_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = mul <16 x i32> %i, %j
@@ -508,7 +508,7 @@ define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
declare float @sqrtf(float) readnone
define float @sqrtA(float %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtA:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -519,7 +519,7 @@ entry:
declare double @sqrt(double) readnone
define double @sqrtB(double %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtB:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -530,7 +530,7 @@ entry:
declare float @llvm.sqrt.f32(float)
define float @sqrtC(float %a) nounwind {
; CHECK-LABEL: sqrtC:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%b = call float @llvm.sqrt.f32(float %a)
@@ -540,7 +540,7 @@ define float @sqrtC(float %a) nounwind {
declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
define <16 x float> @sqrtD(<16 x float> %a) nounwind {
; CHECK-LABEL: sqrtD:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vsqrtps %zmm0, %zmm0
; CHECK-NEXT: retq
%b = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a)
@@ -550,7 +550,7 @@ define <16 x float> @sqrtD(<16 x float> %a) nounwind {
declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
define <8 x double> @sqrtE(<8 x double> %a) nounwind {
; CHECK-LABEL: sqrtE:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vsqrtpd %zmm0, %zmm0
; CHECK-NEXT: retq
%b = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a)
@@ -559,7 +559,7 @@ define <8 x double> @sqrtE(<8 x double> %a) nounwind {
define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
; CHECK-LABEL: fadd_broadcast:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = fadd <16 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
@@ -568,7 +568,7 @@ define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
; CHECK-LABEL: addq_broadcast:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = add <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -577,27 +577,27 @@ define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
; AVX512F-LABEL: orq_broadcast:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: orq_broadcast:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: orq_broadcast:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: orq_broadcast:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: orq_broadcast:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
%b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -606,27 +606,27 @@ define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
; AVX512F-LABEL: andd512fold:
-; AVX512F: ## BB#0: ## %entry
+; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpandd (%rdi), %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: andd512fold:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: # BB#0: # %entry
; AVX512VL-NEXT: vpandd (%rdi), %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: andd512fold:
-; AVX512BW: ## BB#0: ## %entry
+; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpandd (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: andd512fold:
-; AVX512DQ: ## BB#0: ## %entry
+; AVX512DQ: # BB#0: # %entry
; AVX512DQ-NEXT: vandps (%rdi), %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: andd512fold:
-; SKX: ## BB#0: ## %entry
+; SKX: # BB#0: # %entry
; SKX-NEXT: vandps (%rdi), %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -637,27 +637,27 @@ entry:
define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
; AVX512F-LABEL: andqbrst:
-; AVX512F: ## BB#0: ## %entry
+; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: andqbrst:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: # BB#0: # %entry
; AVX512VL-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: andqbrst:
-; AVX512BW: ## BB#0: ## %entry
+; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: andqbrst:
-; AVX512DQ: ## BB#0: ## %entry
+; AVX512DQ: # BB#0: # %entry
; AVX512DQ-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: andqbrst:
-; SKX: ## BB#0: ## %entry
+; SKX: # BB#0: # %entry
; SKX-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -670,7 +670,7 @@ entry:
define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vaddps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm0 {%k1}
@@ -685,7 +685,7 @@ define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vmulps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vmulps %zmm2, %zmm1, %zmm0 {%k1}
@@ -700,7 +700,7 @@ define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vminps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vminps %zmm2, %zmm1, %zmm0 {%k1}
@@ -716,38 +716,38 @@ define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vminpd:
-; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512F: # BB#0:
+; AVX512F-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512F-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512F-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_mask_vminpd:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; AVX512VL-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_mask_vminpd:
-; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512BW-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512BW-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vminpd:
-; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512DQ-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_mask_vminpd:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpxor %ymm4, %ymm4, %ymm4
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; SKX-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -763,7 +763,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vmaxps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vmaxps %zmm2, %zmm1, %zmm0 {%k1}
@@ -779,38 +779,38 @@ define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vmaxpd:
-; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512F: # BB#0:
+; AVX512F-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512F-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512F-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_mask_vmaxpd:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; AVX512VL-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_mask_vmaxpd:
-; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512BW-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512BW-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vmaxpd:
-; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512DQ-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_mask_vmaxpd:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpxor %ymm4, %ymm4, %ymm4
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; SKX-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -826,7 +826,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vsubps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1}
@@ -841,7 +841,7 @@ define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vdivps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vdivps %zmm2, %zmm1, %zmm0 {%k1}
@@ -856,7 +856,7 @@ define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqq %zmm4, %zmm3, %k1
; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -871,7 +871,7 @@ define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
; CHECK-LABEL: test_maskz_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm3, %zmm3, %zmm3
; CHECK-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -885,7 +885,7 @@ define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_fold_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm3, %zmm3, %zmm3
; CHECK-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vaddpd (%rdi), %zmm1, %zmm0 {%k1}
@@ -901,7 +901,7 @@ define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
; CHECK-LABEL: test_maskz_fold_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z}
@@ -916,7 +916,7 @@ define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
; CHECK-LABEL: test_broadcast_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load double, double* %j
@@ -929,7 +929,7 @@ define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind
define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_broadcast_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm0, %zmm0, %zmm0
; CHECK-NEXT: vpcmpneqq %zmm0, %zmm2, %k1
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1}
@@ -948,7 +948,7 @@ define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double>
define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
; CHECK-LABEL: test_maskz_broadcast_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
@@ -966,27 +966,27 @@ define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
define <16 x float> @test_fxor(<16 x float> %a) {
; AVX512F-LABEL: test_fxor:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_fxor:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_fxor:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_fxor:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_fxor:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
@@ -996,30 +996,30 @@ define <16 x float> @test_fxor(<16 x float> %a) {
define <8 x float> @test_fxor_8f32(<8 x float> %a) {
; AVX512F-LABEL: test_fxor_8f32:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; AVX512F-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_fxor_8f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxord {{.*}}(%rip){1to8}, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_fxor_8f32:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; AVX512BW-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_fxor_8f32:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; AVX512DQ-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_fxor_8f32:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: retq
%res = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -1028,27 +1028,27 @@ define <8 x float> @test_fxor_8f32(<8 x float> %a) {
define <8 x double> @fabs_v8f64(<8 x double> %p)
; AVX512F-LABEL: fabs_v8f64:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fabs_v8f64:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: fabs_v8f64:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: fabs_v8f64:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: fabs_v8f64:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
{
@@ -1059,27 +1059,27 @@ declare <8 x double> @llvm.fabs.v8f64(<8 x double> %p)
define <16 x float> @fabs_v16f32(<16 x float> %p)
; AVX512F-LABEL: fabs_v16f32:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fabs_v16f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: fabs_v16f32:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: fabs_v16f32:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: fabs_v16f32:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
{
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index 300cb51f871c0..edcc3933bc390 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -1004,8 +1004,6 @@ define i8 @test_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_pcmpeq_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1018,8 +1016,6 @@ define i8 @test_mask_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1058,8 +1054,6 @@ define i8 @test_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_pcmpgt_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1072,8 +1066,6 @@ define i8 @test_mask_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1087,8 +1079,6 @@ define i8 @test_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_pcmpeq_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1101,8 +1091,6 @@ define i8 @test_mask_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1116,10 +1104,6 @@ define i8 @test_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_pcmpeq_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1132,10 +1116,6 @@ define i8 @test_mask_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1149,8 +1129,6 @@ define i8 @test_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_pcmpgt_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1163,8 +1141,6 @@ define i8 @test_mask_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1178,10 +1154,6 @@ define i8 @test_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_pcmpgt_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1194,10 +1166,6 @@ define i8 @test_mask_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5164,23 +5132,11 @@ define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k1 ## encoding: [0x62,0xf2,0xf5,0x28,0x37,0xc8]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k4 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xe0,0x02]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xe9]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5219,43 +5175,31 @@ define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0x37,0xd0]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k6 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x1f,0xf0,0x02]
+; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k6} ## encoding: [0x62,0xf2,0xfd,0x2e,0x29,0xc1]
+; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k7 {%k6} ## encoding: [0x62,0xf2,0xf5,0x2e,0x37,0xf8]
+; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k1 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1f,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k4 {%k6} ## encoding: [0x62,0xf3,0xf5,0x2e,0x1f,0xe0,0x02]
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k5 {%k6} ## encoding: [0x62,0xf2,0xfd,0x2e,0x37,0xe9]
; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xf9]
-; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 %mask)
@@ -5283,23 +5227,11 @@ define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xc9,0x01]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xe1,0x05]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xe9,0x06]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5338,43 +5270,31 @@ define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xd1,0x01]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xf1,0x05]
+; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k6} ## encoding: [0x62,0xf2,0xfd,0x2e,0x29,0xc1]
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k7 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xf9,0x01]
+; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k1 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k4 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xe1,0x05]
+; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k5 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xe9,0x06]
; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xf9,0x06]
-; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 %mask)
@@ -5402,23 +5322,11 @@ define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_cmp_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k1 ## encoding: [0x62,0xf1,0x75,0x08,0x66,0xc8]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k4 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xe0,0x02]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xe9]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5457,43 +5365,31 @@ define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i8> @test_mask_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k2 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x66,0xd0]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k6 {%k1} ## encoding: [0x62,0xf3,0x75,0x09,0x1f,0xf0,0x02]
+; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k6} ## encoding: [0x62,0xf1,0x7d,0x0e,0x76,0xc1]
+; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k7 {%k6} ## encoding: [0x62,0xf1,0x75,0x0e,0x66,0xf8]
+; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k1 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1f,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k4 {%k6} ## encoding: [0x62,0xf3,0x75,0x0e,0x1f,0xe0,0x02]
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k5 {%k6} ## encoding: [0x62,0xf1,0x7d,0x0e,0x66,0xe9]
; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xf9]
-; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 %mask)
@@ -5521,23 +5417,11 @@ define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_ucmp_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xc9,0x01]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xe1,0x05]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xe9,0x06]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5576,43 +5460,31 @@ define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i8> @test_mask_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xd1,0x01]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xf1,0x05]
+; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k6} ## encoding: [0x62,0xf1,0x7d,0x0e,0x76,0xc1]
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k7 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xf9,0x01]
+; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k1 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k4 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xe1,0x05]
+; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k5 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xe9,0x06]
; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xf9,0x06]
-; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 %mask)
@@ -5640,35 +5512,11 @@ define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k1 ## encoding: [0x62,0xf2,0xf5,0x08,0x37,0xc8]
-; CHECK-NEXT: kshiftlw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k4 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xe0,0x02]
-; CHECK-NEXT: kshiftlw $14, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xe9]
-; CHECK-NEXT: kshiftlw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5707,57 +5555,33 @@ define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0x37,0xd0]
-; CHECK-NEXT: kshiftlw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k6 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0x1f,0xf0,0x02]
-; CHECK-NEXT: kshiftlw $14, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xf9]
+; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf2,0xfd,0x0f,0x29,0xc1]
+; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k6 {%k7} ## encoding: [0x62,0xf2,0xf5,0x0f,0x37,0xf0]
+; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k1 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k4 {%k7} ## encoding: [0x62,0xf3,0xf5,0x0f,0x1f,0xe0,0x02]
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k5 {%k7} ## encoding: [0x62,0xf2,0xfd,0x0f,0x37,0xe9]
; CHECK-NEXT: kshiftlw $14, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0e]
; CHECK-NEXT: kshiftrw $14, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0e]
; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 %mask)
@@ -5785,35 +5609,11 @@ define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xc9,0x01]
-; CHECK-NEXT: kshiftlw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xe1,0x05]
-; CHECK-NEXT: kshiftlw $14, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xe9,0x06]
-; CHECK-NEXT: kshiftlw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5852,57 +5652,33 @@ define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xd1,0x01]
-; CHECK-NEXT: kshiftlw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xf1,0x05]
-; CHECK-NEXT: kshiftlw $14, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xf9,0x06]
+; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf2,0xfd,0x0f,0x29,0xc1]
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k6 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xf1,0x01]
+; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k1 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k4 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xe1,0x05]
+; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k5 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xe9,0x06]
; CHECK-NEXT: kshiftlw $14, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0e]
; CHECK-NEXT: kshiftrw $14, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0e]
; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 %mask)
diff --git a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
new file mode 100644
index 0000000000000..f297fc3db95fa
--- /dev/null
+++ b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
@@ -0,0 +1,13485 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s -check-prefix=NoVLX
+
+define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp eq <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp eq <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp eq <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp eq <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp eq <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp eq <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp eq <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp eq <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp sgt <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp sgt <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp sgt <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp sgt <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp sgt <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp sgt <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp sgt <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp sgt <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleb %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp sge <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltb (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp sge <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleb %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp sge <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltb (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp sge <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp sge <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp sge <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp sge <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp sge <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %zmm1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %zmm1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp ult <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp ult <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp ult <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp ult <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp ult <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp ult <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp ult <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp ult <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32, i16, i32)
+define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %1 = bitcast <2 x i64> %__b to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <4 x float> undef, float %load, i32 0
+ %1 = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %1 = bitcast <2 x i64> %__b to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <4 x float> undef, float %load, i32 0
+ %1 = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %1 = bitcast <2 x i64> %__b to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <4 x float> undef, float %load, i32 0
+ %1 = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %1 = bitcast <2 x i64> %__b to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <4 x float> undef, float %load, i32 0
+ %1 = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %1 = bitcast <4 x i64> %__b to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovaps (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <8 x float> undef, float %load, i32 0
+ %1 = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %1 = bitcast <4 x i64> %__b to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <8 x float> undef, float %load, i32 0
+ %1 = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %1 = bitcast <4 x i64> %__b to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <8 x float> undef, float %load, i32 0
+ %1 = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %1 = bitcast <8 x i64> %__b to <16 x float>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x float>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <16 x float> undef, float %load, i32 0
+ %1 = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %1 = bitcast <8 x i64> %__b to <16 x float>
+ %2 = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %0, <16 x float> %1, i32 2, i16 -1, i32 8)
+ %3 = zext i16 %2 to i32
+ ret i32 %3
+}
+
+
+define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %1 = bitcast <8 x i64> %__b to <16 x float>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x float>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <16 x float> undef, float %load, i32 0
+ %1 = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: movzwl %ax, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %1 = bitcast <8 x i64> %__b to <16 x float>
+ %2 = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %0, <16 x float> %1, i32 2, i16 -1, i32 8)
+ %3 = zext i16 %2 to i64
+ ret i64 %3
+}
+
+
+declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i8, i32)
+define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+
+define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %1 = bitcast <4 x i64> %__b to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <4 x double> undef, double %load, i32 0
+ %1 = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %1 = bitcast <4 x i64> %__b to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <4 x double> undef, double %load, i32 0
+ %1 = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %1 = bitcast <4 x i64> %__b to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <4 x double> undef, double %load, i32 0
+ %1 = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %1 = bitcast <4 x i64> %__b to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <4 x double> undef, double %load, i32 0
+ %1 = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <8 x double> undef, double %load, i32 0
+ %1 = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %0, <8 x double> %1, i32 2, i8 -1, i32 8)
+ %3 = zext i8 %2 to i16
+ ret i16 %3
+}
+
+
+define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <8 x double> undef, double %load, i32 0
+ %1 = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %0, <8 x double> %1, i32 2, i8 -1, i32 8)
+ %3 = zext i8 %2 to i32
+ ret i32 %3
+}
+
+
+define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <8 x double> undef, double %load, i32 0
+ %1 = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %0, <8 x double> %1, i32 2, i8 -1, i32 8)
+ %3 = zext i8 %2 to i64
+ ret i64 %3
+}
+
+
+
diff --git a/test/CodeGen/X86/bswap-vector.ll b/test/CodeGen/X86/bswap-vector.ll
index fd5983df83251..7463f5f6d086a 100644
--- a/test/CodeGen/X86/bswap-vector.ll
+++ b/test/CodeGen/X86/bswap-vector.ll
@@ -1,11 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mcpu=x86-64 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-NOSSSE3
-; RUN: llc < %s -mcpu=core2 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-SSSE3
-; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-AVX --check-prefix=CHECK-AVX2
-; RUN: llc < %s -mcpu=core-avx2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-WIDE-AVX --check-prefix=CHECK-WIDE-AVX2
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-NOSSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-AVX --check-prefix=CHECK-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-WIDE-AVX --check-prefix=CHECK-WIDE-AVX2
declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
diff --git a/test/CodeGen/X86/bswap-wide-int.ll b/test/CodeGen/X86/bswap-wide-int.ll
new file mode 100644
index 0000000000000..db48eb80de4b9
--- /dev/null
+++ b/test/CodeGen/X86/bswap-wide-int.ll
@@ -0,0 +1,173 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+movbe | FileCheck %s --check-prefix=X86-MOVBE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+movbe | FileCheck %s --check-prefix=X64-MOVBE
+
+declare i64 @llvm.bswap.i64(i64)
+declare i128 @llvm.bswap.i128(i128)
+declare i256 @llvm.bswap.i256(i256)
+
+define i64 @bswap_i64(i64 %a0) nounwind {
+; X86-LABEL: bswap_i64:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: bswapl %edx
+; X86-NEXT: retl
+;
+; X86-MOVBE-LABEL: bswap_i64:
+; X86-MOVBE: # BB#0:
+; X86-MOVBE-NEXT: movbel {{[0-9]+}}(%esp), %eax
+; X86-MOVBE-NEXT: movbel {{[0-9]+}}(%esp), %edx
+; X86-MOVBE-NEXT: retl
+;
+; X64-LABEL: bswap_i64:
+; X64: # BB#0:
+; X64-NEXT: bswapq %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+;
+; X64-MOVBE-LABEL: bswap_i64:
+; X64-MOVBE: # BB#0:
+; X64-MOVBE-NEXT: bswapq %rdi
+; X64-MOVBE-NEXT: movq %rdi, %rax
+; X64-MOVBE-NEXT: retq
+ %1 = call i64 @llvm.bswap.i64(i64 %a0)
+ ret i64 %1
+}
+
+define i128 @bswap_i128(i128 %a0) nounwind {
+; X86-LABEL: bswap_i128:
+; X86: # BB#0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: bswapl %edi
+; X86-NEXT: bswapl %esi
+; X86-NEXT: bswapl %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl %edx, 8(%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edi, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+;
+; X86-MOVBE-LABEL: bswap_i128:
+; X86-MOVBE: # BB#0:
+; X86-MOVBE-NEXT: pushl %edi
+; X86-MOVBE-NEXT: pushl %esi
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-MOVBE-NEXT: movbel %esi, 12(%eax)
+; X86-MOVBE-NEXT: movbel %edi, 8(%eax)
+; X86-MOVBE-NEXT: movbel %ecx, 4(%eax)
+; X86-MOVBE-NEXT: movbel %edx, (%eax)
+; X86-MOVBE-NEXT: popl %esi
+; X86-MOVBE-NEXT: popl %edi
+; X86-MOVBE-NEXT: retl $4
+;
+; X64-LABEL: bswap_i128:
+; X64: # BB#0:
+; X64-NEXT: bswapq %rsi
+; X64-NEXT: bswapq %rdi
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rdi, %rdx
+; X64-NEXT: retq
+;
+; X64-MOVBE-LABEL: bswap_i128:
+; X64-MOVBE: # BB#0:
+; X64-MOVBE-NEXT: bswapq %rsi
+; X64-MOVBE-NEXT: bswapq %rdi
+; X64-MOVBE-NEXT: movq %rsi, %rax
+; X64-MOVBE-NEXT: movq %rdi, %rdx
+; X64-MOVBE-NEXT: retq
+ %1 = call i128 @llvm.bswap.i128(i128 %a0)
+ ret i128 %1
+}
+
+define i256 @bswap_i256(i256 %a0) nounwind {
+; X86-LABEL: bswap_i256:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 28(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 24(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 20(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: retl $4
+;
+; X86-MOVBE-LABEL: bswap_i256:
+; X86-MOVBE: # BB#0:
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 28(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 24(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 20(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 16(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 12(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 8(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 4(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, (%eax)
+; X86-MOVBE-NEXT: retl $4
+;
+; X64-LABEL: bswap_i256:
+; X64: # BB#0:
+; X64-NEXT: bswapq %r8
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: bswapq %rsi
+; X64-NEXT: movq %rsi, 24(%rdi)
+; X64-NEXT: movq %rdx, 16(%rdi)
+; X64-NEXT: movq %rcx, 8(%rdi)
+; X64-NEXT: movq %r8, (%rdi)
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+;
+; X64-MOVBE-LABEL: bswap_i256:
+; X64-MOVBE: # BB#0:
+; X64-MOVBE-NEXT: movbeq %rsi, 24(%rdi)
+; X64-MOVBE-NEXT: movbeq %rdx, 16(%rdi)
+; X64-MOVBE-NEXT: movbeq %rcx, 8(%rdi)
+; X64-MOVBE-NEXT: movbeq %r8, (%rdi)
+; X64-MOVBE-NEXT: movq %rdi, %rax
+; X64-MOVBE-NEXT: retq
+ %1 = call i256 @llvm.bswap.i256(i256 %a0)
+ ret i256 %1
+}
diff --git a/test/CodeGen/X86/compress_expand.ll b/test/CodeGen/X86/compress_expand.ll
index e09fcf2a336e9..f62e18869a982 100644
--- a/test/CodeGen/X86/compress_expand.ll
+++ b/test/CodeGen/X86/compress_expand.ll
@@ -265,9 +265,7 @@ define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger)
; SKX: # BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k0
-; SKX-NEXT: kshiftlb $6, %k0, %k0
-; SKX-NEXT: kshiftrb $6, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
@@ -295,9 +293,7 @@ define void @test14(float* %base, <2 x float> %V, <2 x i32> %trigger) {
; SKX: # BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k0
-; SKX-NEXT: kshiftlb $6, %k0, %k0
-; SKX-NEXT: kshiftrb $6, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
diff --git a/test/CodeGen/X86/cpus.ll b/test/CodeGen/X86/cpus.ll
index 20ce932a184b5..7901858cb5dc6 100644
--- a/test/CodeGen/X86/cpus.ll
+++ b/test/CodeGen/X86/cpus.ll
@@ -18,6 +18,7 @@
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=broadwell 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=bonnell 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=silvermont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=goldmont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=k8 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=opteron 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=athlon64 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
@@ -34,3 +35,4 @@
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=btver1 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=btver2 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=znver1 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+
diff --git a/test/CodeGen/X86/fp128-cast.ll b/test/CodeGen/X86/fp128-cast.ll
index 6568f73029e09..6543292c08b4c 100644
--- a/test/CodeGen/X86/fp128-cast.ll
+++ b/test/CodeGen/X86/fp128-cast.ll
@@ -61,10 +61,10 @@ entry:
; X32: retl
;
; X64-LABEL: TestFPToSIF128_I32:
-; X64: movaps vf128(%rip), %xmm0
-; X64-NEXT: callq __fixtfsi
-; X64-NEXT: movl %eax, vi32(%rip)
-; X64: retq
+; X64: movaps vf128(%rip), %xmm0
+; X64-NEXT: callq __fixtfsi
+; X64-NEXT: movl %eax, vi32(%rip)
+; X64: retq
}
define void @TestFPToUIF128_U32() {
@@ -78,10 +78,10 @@ entry:
; X32: retl
;
; X64-LABEL: TestFPToUIF128_U32:
-; X64: movaps vf128(%rip), %xmm0
-; X64-NEXT: callq __fixunstfsi
-; X64-NEXT: movl %eax, vu32(%rip)
-; X64: retq
+; X64: movaps vf128(%rip), %xmm0
+; X64-NEXT: callq __fixunstfsi
+; X64-NEXT: movl %eax, vu32(%rip)
+; X64: retq
}
define void @TestFPToSIF128_I64() {
diff --git a/test/CodeGen/X86/insertelement-zero.ll b/test/CodeGen/X86/insertelement-zero.ll
index e30772b528bc0..ea7418f4707e2 100644
--- a/test/CodeGen/X86/insertelement-zero.ll
+++ b/test/CodeGen/X86/insertelement-zero.ll
@@ -1,13 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-unknown"
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
define <2 x double> @insert_v2f64_z1(<2 x double> %a) {
; SSE2-LABEL: insert_v2f64_z1:
diff --git a/test/CodeGen/X86/lower-vec-shift.ll b/test/CodeGen/X86/lower-vec-shift.ll
index 783cda0a8dd70..8d64baf5f2a46 100644
--- a/test/CodeGen/X86/lower-vec-shift.ll
+++ b/test/CodeGen/X86/lower-vec-shift.ll
@@ -1,8 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
-
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
; Verify that the following shifts are lowered into a sequence of two shifts plus
; a blend. On pre-avx2 targets, instead of scalarizing logical and arithmetic
diff --git a/test/CodeGen/X86/lower-vec-shuffle-bug.ll b/test/CodeGen/X86/lower-vec-shuffle-bug.ll
index 5918e8045f62f..7a081b5568673 100644
--- a/test/CodeGen/X86/lower-vec-shuffle-bug.ll
+++ b/test/CodeGen/X86/lower-vec-shuffle-bug.ll
@@ -1,8 +1,9 @@
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
define <4 x double> @test1(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -12,7 +13,7 @@ entry:
define <4 x double> @test2(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -22,7 +23,7 @@ entry:
define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -32,7 +33,7 @@ entry:
define <4 x double> @test4(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll
index 3c616e8a9f439..7a2e41e10a370 100644
--- a/test/CodeGen/X86/masked_memop.ll
+++ b/test/CodeGen/X86/masked_memop.ll
@@ -462,9 +462,7 @@ define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $14, %k0, %k0
-; SKX-NEXT: kshiftrw $14, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
; SKX-NEXT: vmovups %xmm1, (%rdi) {%k1}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
@@ -550,9 +548,7 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $14, %k0, %k0
-; SKX-NEXT: kshiftrw $14, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
; SKX-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
@@ -601,9 +597,7 @@ define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $14, %k0, %k0
-; SKX-NEXT: kshiftrw $14, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1}
; SKX-NEXT: vpmovsxdq %xmm0, %xmm0
@@ -645,9 +639,7 @@ define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
-; SKX-NEXT: kshiftlw $14, %k0, %k0
-; SKX-NEXT: kshiftrw $14, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} {z}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll
index 9d26aee2e8b88..0e09abf73c8c9 100644
--- a/test/CodeGen/X86/memcmp.ll
+++ b/test/CodeGen/X86/memcmp.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=AVX2
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
; This tests codegen time inlining/optimization of memcmp
; rdar://6480398
@@ -11,60 +12,70 @@
declare i32 @memcmp(i8*, i8*, i64)
define i32 @length2(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length2:
-; X32: # BB#0: # %loadbb
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movzwl (%ecx), %ecx
-; X32-NEXT: movzwl (%eax), %eax
-; X32-NEXT: rolw $8, %cx
-; X32-NEXT: rolw $8, %ax
-; X32-NEXT: movzwl %cx, %ecx
-; X32-NEXT: movzwl %ax, %eax
-; X32-NEXT: cmpl %eax, %ecx
-; X32-NEXT: je .LBB0_1
-; X32-NEXT: # BB#2: # %res_block
-; X32-NEXT: movl $-1, %eax
-; X32-NEXT: jb .LBB0_4
-; X32-NEXT: # BB#3: # %res_block
-; X32-NEXT: movl $1, %eax
-; X32-NEXT: .LBB0_4: # %endblock
-; X32-NEXT: retl
-; X32-NEXT: .LBB0_1:
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: retl
+; X86-NOSSE-LABEL: length2:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movzwl (%ecx), %ecx
+; X86-NOSSE-NEXT: movzwl (%eax), %eax
+; X86-NOSSE-NEXT: rolw $8, %cx
+; X86-NOSSE-NEXT: rolw $8, %ax
+; X86-NOSSE-NEXT: cmpw %ax, %cx
+; X86-NOSSE-NEXT: movl $-1, %eax
+; X86-NOSSE-NEXT: jae .LBB0_1
+; X86-NOSSE-NEXT: # BB#2:
+; X86-NOSSE-NEXT: je .LBB0_3
+; X86-NOSSE-NEXT: .LBB0_4:
+; X86-NOSSE-NEXT: retl
+; X86-NOSSE-NEXT: .LBB0_1:
+; X86-NOSSE-NEXT: movl $1, %eax
+; X86-NOSSE-NEXT: jne .LBB0_4
+; X86-NOSSE-NEXT: .LBB0_3:
+; X86-NOSSE-NEXT: xorl %eax, %eax
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length2:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movzwl (%ecx), %ecx
+; X86-SSE2-NEXT: movzwl (%eax), %eax
+; X86-SSE2-NEXT: rolw $8, %cx
+; X86-SSE2-NEXT: rolw $8, %ax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: cmpw %ax, %cx
+; X86-SSE2-NEXT: movl $-1, %ecx
+; X86-SSE2-NEXT: movl $1, %eax
+; X86-SSE2-NEXT: cmovbl %ecx, %eax
+; X86-SSE2-NEXT: cmovel %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: length2:
-; X64: # BB#0: # %loadbb
+; X64: # BB#0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzwl (%rsi), %ecx
; X64-NEXT: rolw $8, %ax
; X64-NEXT: rolw $8, %cx
-; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: movzwl %cx, %ecx
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: je .LBB0_1
-; X64-NEXT: # BB#2: # %res_block
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpw %cx, %ax
; X64-NEXT: movl $-1, %ecx
; X64-NEXT: movl $1, %eax
; X64-NEXT: cmovbl %ecx, %eax
-; X64-NEXT: retq
-; X64-NEXT: .LBB0_1:
-; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmovel %edx, %eax
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
ret i32 %m
}
define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length2_eq:
-; X32: # BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movzwl (%ecx), %ecx
-; X32-NEXT: cmpw (%eax), %cx
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length2_eq:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: cmpw (%eax), %cx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length2_eq:
; X64: # BB#0:
@@ -78,13 +89,13 @@ define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
}
define i1 @length2_eq_const(i8* %X) nounwind {
-; X32-LABEL: length2_eq_const:
-; X32: # BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movzwl (%eax), %eax
-; X32-NEXT: cmpl $12849, %eax # imm = 0x3231
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length2_eq_const:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length2_eq_const:
; X64: # BB#0:
@@ -98,17 +109,17 @@ define i1 @length2_eq_const(i8* %X) nounwind {
}
define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length2_eq_nobuiltin_attr:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $2
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length2_eq_nobuiltin_attr:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $2
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length2_eq_nobuiltin_attr:
; X64: # BB#0:
@@ -125,15 +136,15 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
}
define i32 @length3(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length3:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $3
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length3:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $3
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length3:
; X64: # BB#0:
@@ -144,17 +155,17 @@ define i32 @length3(i8* %X, i8* %Y) nounwind {
}
define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length3_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $3
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length3_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $3
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length3_eq:
; X64: # BB#0:
@@ -171,56 +182,70 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
}
define i32 @length4(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length4:
-; X32: # BB#0: # %loadbb
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl (%ecx), %ecx
-; X32-NEXT: movl (%eax), %eax
-; X32-NEXT: bswapl %ecx
-; X32-NEXT: bswapl %eax
-; X32-NEXT: cmpl %eax, %ecx
-; X32-NEXT: je .LBB6_1
-; X32-NEXT: # BB#2: # %res_block
-; X32-NEXT: movl $-1, %eax
-; X32-NEXT: jb .LBB6_4
-; X32-NEXT: # BB#3: # %res_block
-; X32-NEXT: movl $1, %eax
-; X32-NEXT: .LBB6_4: # %endblock
-; X32-NEXT: retl
-; X32-NEXT: .LBB6_1:
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: retl
+; X86-NOSSE-LABEL: length4:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl (%ecx), %ecx
+; X86-NOSSE-NEXT: movl (%eax), %eax
+; X86-NOSSE-NEXT: bswapl %ecx
+; X86-NOSSE-NEXT: bswapl %eax
+; X86-NOSSE-NEXT: cmpl %eax, %ecx
+; X86-NOSSE-NEXT: movl $-1, %eax
+; X86-NOSSE-NEXT: jae .LBB6_1
+; X86-NOSSE-NEXT: # BB#2:
+; X86-NOSSE-NEXT: je .LBB6_3
+; X86-NOSSE-NEXT: .LBB6_4:
+; X86-NOSSE-NEXT: retl
+; X86-NOSSE-NEXT: .LBB6_1:
+; X86-NOSSE-NEXT: movl $1, %eax
+; X86-NOSSE-NEXT: jne .LBB6_4
+; X86-NOSSE-NEXT: .LBB6_3:
+; X86-NOSSE-NEXT: xorl %eax, %eax
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length4:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %ecx
+; X86-SSE2-NEXT: movl (%eax), %eax
+; X86-SSE2-NEXT: bswapl %ecx
+; X86-SSE2-NEXT: bswapl %eax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: cmpl %eax, %ecx
+; X86-SSE2-NEXT: movl $-1, %ecx
+; X86-SSE2-NEXT: movl $1, %eax
+; X86-SSE2-NEXT: cmovbl %ecx, %eax
+; X86-SSE2-NEXT: cmovel %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: length4:
-; X64: # BB#0: # %loadbb
+; X64: # BB#0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl (%rsi), %ecx
; X64-NEXT: bswapl %eax
; X64-NEXT: bswapl %ecx
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: je .LBB6_1
-; X64-NEXT: # BB#2: # %res_block
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpl %ecx, %eax
; X64-NEXT: movl $-1, %ecx
; X64-NEXT: movl $1, %eax
; X64-NEXT: cmovbl %ecx, %eax
-; X64-NEXT: retq
-; X64-NEXT: .LBB6_1:
-; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmovel %edx, %eax
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
ret i32 %m
}
define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length4_eq:
-; X32: # BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl (%ecx), %ecx
-; X32-NEXT: cmpl (%eax), %ecx
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length4_eq:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: cmpl (%eax), %ecx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length4_eq:
; X64: # BB#0:
@@ -234,12 +259,12 @@ define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
}
define i1 @length4_eq_const(i8* %X) nounwind {
-; X32-LABEL: length4_eq_const:
-; X32: # BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length4_eq_const:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length4_eq_const:
; X64: # BB#0:
@@ -252,15 +277,15 @@ define i1 @length4_eq_const(i8* %X) nounwind {
}
define i32 @length5(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length5:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $5
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length5:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $5
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length5:
; X64: # BB#0:
@@ -271,17 +296,17 @@ define i32 @length5(i8* %X, i8* %Y) nounwind {
}
define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length5_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $5
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length5_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $5
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length5_eq:
; X64: # BB#0:
@@ -298,48 +323,45 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
}
define i32 @length8(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length8:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $8
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length8:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $8
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length8:
-; X64: # BB#0: # %loadbb
+; X64: # BB#0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movq (%rsi), %rcx
; X64-NEXT: bswapq %rax
; X64-NEXT: bswapq %rcx
+; X64-NEXT: xorl %edx, %edx
; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: je .LBB11_1
-; X64-NEXT: # BB#2: # %res_block
; X64-NEXT: movl $-1, %ecx
; X64-NEXT: movl $1, %eax
; X64-NEXT: cmovbl %ecx, %eax
-; X64-NEXT: retq
-; X64-NEXT: .LBB11_1:
-; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmovel %edx, %eax
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
ret i32 %m
}
define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length8_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $8
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length8_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $8
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length8_eq:
; X64: # BB#0:
@@ -353,17 +375,17 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
}
define i1 @length8_eq_const(i8* %X) nounwind {
-; X32-LABEL: length8_eq_const:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $8
-; X32-NEXT: pushl $.L.str
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length8_eq_const:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $8
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length8_eq_const:
; X64: # BB#0:
@@ -377,17 +399,17 @@ define i1 @length8_eq_const(i8* %X) nounwind {
}
define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length12_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $12
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length12_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length12_eq:
; X64: # BB#0:
@@ -404,15 +426,15 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
}
define i32 @length12(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length12:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $12
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length12:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length12:
; X64: # BB#0:
@@ -425,15 +447,15 @@ define i32 @length12(i8* %X, i8* %Y) nounwind {
; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329
define i32 @length16(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length16:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $16
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length16:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $16
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length16:
; X64: # BB#0:
@@ -444,86 +466,108 @@ define i32 @length16(i8* %X, i8* %Y) nounwind {
}
define i1 @length16_eq(i8* %x, i8* %y) nounwind {
-; X32-LABEL: length16_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $16
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
-;
-; SSE2-LABEL: length16_eq:
-; SSE2: # BB#0:
-; SSE2-NEXT: movdqu (%rsi), %xmm0
-; SSE2-NEXT: movdqu (%rdi), %xmm1
-; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
-; SSE2-NEXT: pmovmskb %xmm1, %eax
-; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; SSE2-NEXT: setne %al
-; SSE2-NEXT: retq
-;
-; AVX2-LABEL: length16_eq:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %xmm0
-; AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
-; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: retq
+; X86-NOSSE-LABEL: length16_eq:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu (%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X86-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length16_eq:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm0
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length16_eq:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: retq
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
%cmp = icmp ne i32 %call, 0
ret i1 %cmp
}
define i1 @length16_eq_const(i8* %X) nounwind {
-; X32-LABEL: length16_eq_const:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $16
-; X32-NEXT: pushl $.L.str
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
-;
-; SSE2-LABEL: length16_eq_const:
-; SSE2: # BB#0:
-; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pmovmskb %xmm0, %eax
-; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; SSE2-NEXT: sete %al
-; SSE2-NEXT: retq
-;
-; AVX2-LABEL: length16_eq_const:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %xmm0
-; AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: retq
+; X86-NOSSE-LABEL: length16_eq_const:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq_const:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length16_eq_const:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length16_eq_const:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
%c = icmp eq i32 %m, 0
ret i1 %c
}
define i32 @length32(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length32:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $32
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length32:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length32:
; X64: # BB#0:
@@ -536,90 +580,90 @@ define i32 @length32(i8* %X, i8* %Y) nounwind {
; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325
define i1 @length32_eq(i8* %x, i8* %y) nounwind {
-; X32-LABEL: length32_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $32
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
-;
-; SSE2-LABEL: length32_eq:
-; SSE2: # BB#0:
-; SSE2-NEXT: pushq %rax
-; SSE2-NEXT: movl $32, %edx
-; SSE2-NEXT: callq memcmp
-; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: sete %al
-; SSE2-NEXT: popq %rcx
-; SSE2-NEXT: retq
-;
-; AVX2-LABEL: length32_eq:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %ymm0
-; AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
-; AVX2-NEXT: vpmovmskb %ymm0, %eax
-; AVX2-NEXT: cmpl $-1, %eax
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; X86-LABEL: length32_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $32, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
define i1 @length32_eq_const(i8* %X) nounwind {
-; X32-LABEL: length32_eq_const:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $32
-; X32-NEXT: pushl $.L.str
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
-;
-; SSE2-LABEL: length32_eq_const:
-; SSE2: # BB#0:
-; SSE2-NEXT: pushq %rax
-; SSE2-NEXT: movl $.L.str, %esi
-; SSE2-NEXT: movl $32, %edx
-; SSE2-NEXT: callq memcmp
-; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: setne %al
-; SSE2-NEXT: popq %rcx
-; SSE2-NEXT: retq
-;
-; AVX2-LABEL: length32_eq_const:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %ymm0
-; AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpmovmskb %ymm0, %eax
-; AVX2-NEXT: cmpl $-1, %eax
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; X86-LABEL: length32_eq_const:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq_const:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $.L.str, %esi
+; X64-SSE2-NEXT: movl $32, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq_const:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind
%c = icmp ne i32 %m, 0
ret i1 %c
}
define i32 @length64(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length64:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $64
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length64:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length64:
; X64: # BB#0:
@@ -630,17 +674,17 @@ define i32 @length64(i8* %X, i8* %Y) nounwind {
}
define i1 @length64_eq(i8* %x, i8* %y) nounwind {
-; X32-LABEL: length64_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $64
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length64_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length64_eq:
; X64: # BB#0:
@@ -657,17 +701,17 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind {
}
define i1 @length64_eq_const(i8* %X) nounwind {
-; X32-LABEL: length64_eq_const:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $64
-; X32-NEXT: pushl $.L.str
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length64_eq_const:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length64_eq_const:
; X64: # BB#0:
diff --git a/test/CodeGen/X86/palignr.ll b/test/CodeGen/X86/palignr.ll
index 11e97aadb45de..700c9cf5f3afb 100644
--- a/test/CodeGen/X86/palignr.ll
+++ b/test/CodeGen/X86/palignr.ll
@@ -1,132 +1,162 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -march=x86 -mcpu=core2 -mattr=+ssse3 | FileCheck %s
-; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck --check-prefix=CHECK-YONAH %s
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SSE --check-prefix=CHECK-SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SSE --check-prefix=CHECK-SSSE3
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AVX
define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: test1:
-; CHECK: # BB#0:
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test1:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE-LABEL: test1:
+; CHECK-SSE: # BB#0:
+; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
+; CHECK-SSE-NEXT: retl
+;
+; CHECK-AVX-LABEL: test1:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> undef, <4 x i32> < i32 1, i32 2, i32 3, i32 0 >
- ret <4 x i32> %C
+ ret <4 x i32> %C
}
define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: test2:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test2:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
-; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE2-LABEL: test2:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
+; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test2:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test2:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 3, i32 4 >
- ret <4 x i32> %C
+ ret <4 x i32> %C
}
define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: test3:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test3:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE2-LABEL: test3:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test3:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test3:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 undef, i32 4 >
- ret <4 x i32> %C
+ ret <4 x i32> %C
}
define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: test4:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test4:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
-; CHECK-YONAH-NEXT: movapd %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE2-LABEL: test4:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test4:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test4:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
- ret <4 x i32> %C
+ ret <4 x i32> %C
}
define <4 x float> @test5(<4 x float> %A, <4 x float> %B) nounwind {
-; CHECK-LABEL: test5:
-; CHECK: # BB#0:
-; CHECK-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test5:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
-; CHECK-YONAH-NEXT: movapd %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE-LABEL: test5:
+; CHECK-SSE: # BB#0:
+; CHECK-SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
+; CHECK-SSE-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE-NEXT: retl
+;
+; CHECK-AVX-LABEL: test5:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm1[1],xmm0[0]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
- ret <4 x float> %C
+ ret <4 x float> %C
}
define <8 x i16> @test6(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK-LABEL: test6:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test6:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; CHECK-YONAH-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
-; CHECK-YONAH-NEXT: por %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE2-LABEL: test6:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
+; CHECK-SSE2-NEXT: por %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test6:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test6:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 3, i32 4, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10 >
- ret <8 x i16> %C
+ ret <8 x i16> %C
}
define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK-LABEL: test7:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test7:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-YONAH-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9]
-; CHECK-YONAH-NEXT: por %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE2-LABEL: test7:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9]
+; CHECK-SSE2-NEXT: por %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test7:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test7:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 6, i32 undef, i32 8, i32 9, i32 10, i32 11, i32 12 >
- ret <8 x i16> %C
+ ret <8 x i16> %C
}
define <16 x i8> @test8(<16 x i8> %A, <16 x i8> %B) nounwind {
-; CHECK-LABEL: test8:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test8:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
-; CHECK-YONAH-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4]
-; CHECK-YONAH-NEXT: por %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE2-LABEL: test8:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
+; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4]
+; CHECK-SSE2-NEXT: por %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test8:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test8:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <16 x i8> %A, <16 x i8> %B, <16 x i32> < i32 5, i32 6, i32 7, i32 undef, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20 >
- ret <16 x i8> %C
+ ret <16 x i8> %C
}
; Check that we don't do unary (circular on single operand) palignr incorrectly.
@@ -134,21 +164,26 @@ define <16 x i8> @test8(<16 x i8> %A, <16 x i8> %B) nounwind {
; incorrectly. In particular, one of the operands of the palignr node
; was an UNDEF.)
define <8 x i16> @test9(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK-LABEL: test9:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
-;
-; CHECK-YONAH-LABEL: test9:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: movdqa %xmm1, %xmm0
-; CHECK-YONAH-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; CHECK-YONAH-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; CHECK-YONAH-NEXT: por %xmm0, %xmm1
-; CHECK-YONAH-NEXT: movdqa %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSE2-LABEL: test9:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
+; CHECK-SSE2-NEXT: por %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test9:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test9:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %B, <8 x i16> %A, <8 x i32> < i32 undef, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0 >
- ret <8 x i16> %C
+ ret <8 x i16> %C
}
diff --git a/test/CodeGen/X86/peephole-recurrence.mir b/test/CodeGen/X86/peephole-recurrence.mir
new file mode 100644
index 0000000000000..af57a4fd526fd
--- /dev/null
+++ b/test/CodeGen/X86/peephole-recurrence.mir
@@ -0,0 +1,232 @@
+# RUN: llc -mtriple=x86_64-- -run-pass=peephole-opt -o - %s | FileCheck %s
+
+--- |
+ define i32 @foo(i32 %a) {
+ bb0:
+ br label %bb1
+
+ bb1: ; preds = %bb7, %bb0
+ %vreg0 = phi i32 [ 0, %bb0 ], [ %vreg3, %bb7 ]
+ %cond0 = icmp eq i32 %a, 0
+ br i1 %cond0, label %bb4, label %bb3
+
+ bb3: ; preds = %bb1
+ br label %bb4
+
+ bb4: ; preds = %bb1, %bb3
+ %vreg5 = phi i32 [ 2, %bb3 ], [ 1, %bb1 ]
+ %cond1 = icmp eq i32 %vreg5, 0
+ br i1 %cond1, label %bb7, label %bb6
+
+ bb6: ; preds = %bb4
+ br label %bb7
+
+ bb7: ; preds = %bb4, %bb6
+ %vreg1 = phi i32 [ 2, %bb6 ], [ 1, %bb4 ]
+ %vreg2 = add i32 %vreg5, %vreg0
+ %vreg3 = add i32 %vreg1, %vreg2
+ %cond2 = icmp slt i32 %vreg3, 10
+ br i1 %cond2, label %bb1, label %bb8
+
+ bb8: ; preds = %bb7
+ ret i32 0
+ }
+
+ define i32 @bar(i32 %a, i32* %p) {
+ bb0:
+ br label %bb1
+
+ bb1: ; preds = %bb7, %bb0
+ %vreg0 = phi i32 [ 0, %bb0 ], [ %vreg3, %bb7 ]
+ %cond0 = icmp eq i32 %a, 0
+ br i1 %cond0, label %bb4, label %bb3
+
+ bb3: ; preds = %bb1
+ br label %bb4
+
+ bb4: ; preds = %bb1, %bb3
+ %vreg5 = phi i32 [ 2, %bb3 ], [ 1, %bb1 ]
+ %cond1 = icmp eq i32 %vreg5, 0
+ br i1 %cond1, label %bb7, label %bb6
+
+ bb6: ; preds = %bb4
+ br label %bb7
+
+ bb7: ; preds = %bb4, %bb6
+ %vreg1 = phi i32 [ 2, %bb6 ], [ 1, %bb4 ]
+ %vreg2 = add i32 %vreg5, %vreg0
+ store i32 %vreg0, i32* %p
+ %vreg3 = add i32 %vreg1, %vreg2
+ %cond2 = icmp slt i32 %vreg3, 10
+ br i1 %cond2, label %bb1, label %bb8
+
+ bb8: ; preds = %bb7
+ ret i32 0
+ }
+
+...
+---
+# There is a recurrence formulated around %0, %10, and %3. Check that operands
+# are commuted for ADD instructions in bb.5.bb7 so that the values involved in
+# the recurrence are tied. This will remove redundant copy instruction.
+name: foo
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32, preferred-register: '' }
+ - { id: 1, class: gr32, preferred-register: '' }
+ - { id: 2, class: gr32, preferred-register: '' }
+ - { id: 3, class: gr32, preferred-register: '' }
+ - { id: 4, class: gr32, preferred-register: '' }
+ - { id: 5, class: gr32, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr32, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: gr32, preferred-register: '' }
+ - { id: 11, class: gr32, preferred-register: '' }
+ - { id: 12, class: gr32, preferred-register: '' }
+liveins:
+ - { reg: '%edi', virtual-reg: '%4' }
+body: |
+ bb.0.bb0:
+ successors: %bb.1.bb1(0x80000000)
+ liveins: %edi
+
+ %4 = COPY %edi
+ %5 = MOV32r0 implicit-def dead %eflags
+
+ bb.1.bb1:
+ successors: %bb.3.bb4(0x30000000), %bb.2.bb3(0x50000000)
+
+ ; CHECK: %0 = PHI %5, %bb.0.bb0, %3, %bb.5.bb7
+ %0 = PHI %5, %bb.0.bb0, %3, %bb.5.bb7
+ %6 = MOV32ri 1
+ TEST32rr %4, %4, implicit-def %eflags
+ JE_1 %bb.3.bb4, implicit %eflags
+ JMP_1 %bb.2.bb3
+
+ bb.2.bb3:
+ successors: %bb.3.bb4(0x80000000)
+
+ %7 = MOV32ri 2
+
+ bb.3.bb4:
+ successors: %bb.5.bb7(0x30000000), %bb.4.bb6(0x50000000)
+
+ %1 = PHI %6, %bb.1.bb1, %7, %bb.2.bb3
+ TEST32rr %1, %1, implicit-def %eflags
+ JE_1 %bb.5.bb7, implicit %eflags
+ JMP_1 %bb.4.bb6
+
+ bb.4.bb6:
+ successors: %bb.5.bb7(0x80000000)
+
+ %9 = MOV32ri 2
+
+ bb.5.bb7:
+ successors: %bb.1.bb1(0x7c000000), %bb.6.bb8(0x04000000)
+
+ %2 = PHI %6, %bb.3.bb4, %9, %bb.4.bb6
+ %10 = ADD32rr %1, %0, implicit-def dead %eflags
+ ; CHECK: %10 = ADD32rr
+ ; CHECK-SAME: %0,
+ ; CHECK-SAME: %1,
+ %3 = ADD32rr %2, killed %10, implicit-def dead %eflags
+ ; CHECK: %3 = ADD32rr
+ ; CHECK-SAME: %10,
+ ; CHECK-SAME: %2,
+ %11 = SUB32ri8 %3, 10, implicit-def %eflags
+ JL_1 %bb.1.bb1, implicit %eflags
+ JMP_1 %bb.6.bb8
+
+ bb.6.bb8:
+ %12 = MOV32r0 implicit-def dead %eflags
+ %eax = COPY %12
+ RET 0, %eax
+
+...
+---
+# Here a recurrence is formulated around %0, %11, and %3, but operands should
+# not be commuted because %0 has a use outside of recurrence. This is to
+# prevent the case of commuting operands ties the values with overlapping live
+# ranges.
+name: bar
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32, preferred-register: '' }
+ - { id: 1, class: gr32, preferred-register: '' }
+ - { id: 2, class: gr32, preferred-register: '' }
+ - { id: 3, class: gr32, preferred-register: '' }
+ - { id: 4, class: gr32, preferred-register: '' }
+ - { id: 5, class: gr64, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr32, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: gr32, preferred-register: '' }
+ - { id: 11, class: gr32, preferred-register: '' }
+ - { id: 12, class: gr32, preferred-register: '' }
+ - { id: 13, class: gr32, preferred-register: '' }
+liveins:
+ - { reg: '%edi', virtual-reg: '%4' }
+ - { reg: '%rsi', virtual-reg: '%5' }
+body: |
+ bb.0.bb0:
+ successors: %bb.1.bb1(0x80000000)
+ liveins: %edi, %rsi
+
+ %5 = COPY %rsi
+ %4 = COPY %edi
+ %6 = MOV32r0 implicit-def dead %eflags
+
+ bb.1.bb1:
+ successors: %bb.3.bb4(0x30000000), %bb.2.bb3(0x50000000)
+
+ %0 = PHI %6, %bb.0.bb0, %3, %bb.5.bb7
+ ; CHECK: %0 = PHI %6, %bb.0.bb0, %3, %bb.5.bb7
+ %7 = MOV32ri 1
+ TEST32rr %4, %4, implicit-def %eflags
+ JE_1 %bb.3.bb4, implicit %eflags
+ JMP_1 %bb.2.bb3
+
+ bb.2.bb3:
+ successors: %bb.3.bb4(0x80000000)
+
+ %8 = MOV32ri 2
+
+ bb.3.bb4:
+ successors: %bb.5.bb7(0x30000000), %bb.4.bb6(0x50000000)
+
+ %1 = PHI %7, %bb.1.bb1, %8, %bb.2.bb3
+ TEST32rr %1, %1, implicit-def %eflags
+ JE_1 %bb.5.bb7, implicit %eflags
+ JMP_1 %bb.4.bb6
+
+ bb.4.bb6:
+ successors: %bb.5.bb7(0x80000000)
+
+ %10 = MOV32ri 2
+
+ bb.5.bb7:
+ successors: %bb.1.bb1(0x7c000000), %bb.6.bb8(0x04000000)
+
+ %2 = PHI %7, %bb.3.bb4, %10, %bb.4.bb6
+ %11 = ADD32rr %1, %0, implicit-def dead %eflags
+ ; CHECK: %11 = ADD32rr
+ ; CHECK-SAME: %1,
+ ; CHECK-SAME: %0,
+ MOV32mr %5, 1, _, 0, _, %0 :: (store 4 into %ir.p)
+ %3 = ADD32rr %2, killed %11, implicit-def dead %eflags
+ ; CHECK: %3 = ADD32rr
+ ; CHECK-SAME: %2,
+ ; CHECK-SAME: %11,
+ %12 = SUB32ri8 %3, 10, implicit-def %eflags
+ JL_1 %bb.1.bb1, implicit %eflags
+ JMP_1 %bb.6.bb8
+
+ bb.6.bb8:
+ %13 = MOV32r0 implicit-def dead %eflags
+ %eax = COPY %13
+ RET 0, %eax
+
+...
diff --git a/test/CodeGen/X86/sbb.ll b/test/CodeGen/X86/sbb.ll
index bc00fc7c66ad7..414780b2d4e65 100644
--- a/test/CodeGen/X86/sbb.ll
+++ b/test/CodeGen/X86/sbb.ll
@@ -111,6 +111,86 @@ define i8 @i8_select_neg1_or_0_commuted_as_math(i8 %x) {
ret i8 %add
}
+; (X <u Y) ? -1 : 0 --> cmp, sbb
+
+define i32 @ult_select_neg1_or_0(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: ult_select_neg1_or_0:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: sbbl %eax, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ult i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; Swap the predicate and compare operands:
+; (Y >u X) ? -1 : 0 --> cmp, sbb
+
+define i32 @ugt_select_neg1_or_0(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: ugt_select_neg1_or_0:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: cmpl %edi, %esi
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: cmovbel %ecx, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ugt i32 %y, %x
+ %ext = sext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; Invert the predicate and effectively swap the select operands:
+; (X >=u Y) ? 0 : -1 --> (X <u Y) ? -1 : 0 --> cmp, sbb
+
+define i32 @uge_select_0_or_neg1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: uge_select_0_or_neg1:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: setae %al
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: retq
+ %cmp = icmp uge i32 %x, %y
+ %ext = zext i1 %cmp to i32
+ %add = add i32 %ext, -1
+ ret i32 %add
+}
+
+; Swap the predicate and compare operands:
+; (Y <=u X) ? 0 : -1 --> (X <u Y) ? -1 : 0 --> cmp, sbb
+
+define i32 @ule_select_0_or_neg1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: ule_select_0_or_neg1:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl %edi, %esi
+; CHECK-NEXT: setbe %al
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ule i32 %y, %x
+ %ext = zext i1 %cmp to i32
+ %add = add i32 %ext, -1
+ ret i32 %add
+}
+
+; Verify that subtract with constant is the same thing.
+; (X >=u Y) ? 0 : -1 --> (X <u Y) ? -1 : 0 --> cmp, sbb
+
+define i32 @uge_select_0_or_neg1_sub(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: uge_select_0_or_neg1_sub:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: setae %al
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: retq
+ %cmp = icmp uge i32 %x, %y
+ %ext = zext i1 %cmp to i32
+ %sub = sub i32 %ext, 1
+ ret i32 %sub
+}
+
; Make sure we're creating nodes with the right value types. This would crash.
; https://bugs.llvm.org/show_bug.cgi?id=33560
diff --git a/test/CodeGen/X86/vector-shuffle-512-v16.ll b/test/CodeGen/X86/vector-shuffle-512-v16.ll
index 2e65bd8c75c77..174a487160c7d 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -279,6 +279,35 @@ define <16 x i32> @shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u(<16 x i32> %a
ret <16 x i32> %c
}
+;FIXME: can do better with vpcompress
+define <8 x i32> @test_v16i32_1_3_5_7_9_11_13_15(<16 x i32> %v) {
+; ALL-LABEL: test_v16i32_1_3_5_7_9_11_13_15:
+; ALL: # BB#0:
+; ALL-NEXT: vextracti32x8 $1, %zmm0, %ymm1
+; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
+; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; ALL-NEXT: retq
+ %res = shufflevector <16 x i32> %v, <16 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i32> %res
+}
+
+;FIXME: can do better with vpcompress
+define <4 x i32> @test_v16i32_0_1_2_12 (<16 x i32> %v) {
+; ALL-LABEL: test_v16i32_0_1_2_12:
+; ALL: # BB#0:
+; ALL-NEXT: vpextrd $1, %xmm0, %eax
+; ALL-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1
+; ALL-NEXT: vpextrd $2, %xmm0, %eax
+; ALL-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; ALL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %eax
+; ALL-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %res = shufflevector <16 x i32> %v, <16 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 12>
+ ret <4 x i32> %res
+}
+
define <8 x float> @shuffle_v16f32_extract_256(float* %RET, float* %a) {
; ALL-LABEL: shuffle_v16f32_extract_256:
; ALL: # BB#0:
@@ -290,6 +319,34 @@ define <8 x float> @shuffle_v16f32_extract_256(float* %RET, float* %a) {
ret <8 x float> %v2
}
+;FIXME: can do better with vcompressp
+define <8 x float> @test_v16f32_0_1_2_3_4_6_7_10 (<16 x float> %v) {
+; ALL-LABEL: test_v16f32_0_1_2_3_4_6_7_10:
+; ALL: # BB#0:
+; ALL-NEXT: vextractf32x8 $1, %zmm0, %ymm1
+; ALL-NEXT: vmovsldup {{.*#+}} xmm1 = xmm1[0,0,2,2]
+; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,7,u]
+; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
+; ALL-NEXT: retq
+ %res = shufflevector <16 x float> %v, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 6, i32 7, i32 10>
+ ret <8 x float> %res
+}
+
+;FIXME: can do better with vcompressp
+define <4 x float> @test_v16f32_0_1_3_6 (<16 x float> %v) {
+; ALL-LABEL: test_v16f32_0_1_3_6:
+; ALL: # BB#0:
+; ALL-NEXT: vextractf32x4 $1, %zmm0, %xmm1
+; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %res = shufflevector <16 x float> %v, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 3, i32 6>
+ ret <4 x float> %res
+}
+
define <16 x i32> @shuffle_v16i16_1_0_0_0_5_4_4_4_9_8_8_8_13_12_12_12(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i16_1_0_0_0_5_4_4_4_9_8_8_8_13_12_12_12:
; ALL: # BB#0:
diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll
index 30c8d1b2373e4..d0b7e4eb205c6 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -2659,3 +2659,91 @@ define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) {
%shuffle = shufflevector <2 x double> %a, <2 x double> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
ret <8 x double> %shuffle
}
+
+;FIXME: compressp
+define <4 x double> @test_v8f64_2346 (<8 x double> %v) {
+; AVX512F-LABEL: test_v8f64_2346:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,2]
+; AVX512F-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: test_v8f64_2346:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512F-32-NEXT: vextractf64x4 $1, %zmm0, %ymm0
+; AVX512F-32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,2]
+; AVX512F-32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX512F-32-NEXT: retl
+ %res = shufflevector <8 x double> %v, <8 x double> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 6>
+ ret <4 x double> %res
+}
+
+;FIXME: compressp
+define <2 x double> @test_v8f64_34 (<8 x double> %v) {
+; AVX512F-LABEL: test_v8f64_34:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm1
+; AVX512F-NEXT: vextractf32x4 $1, %zmm0, %xmm0
+; AVX512F-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: test_v8f64_34:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
+; AVX512F-32-NEXT: vextractf32x4 $1, %zmm0, %xmm0
+; AVX512F-32-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; AVX512F-32-NEXT: vzeroupper
+; AVX512F-32-NEXT: retl
+ %res = shufflevector <8 x double> %v, <8 x double> undef, <2 x i32> <i32 3, i32 4>
+ ret <2 x double> %res
+}
+
+; FIXME: vpcompress
+define <4 x i64> @test_v8i64_1257 (<8 x i64> %v) {
+; AVX512F-LABEL: test_v8i64_1257:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
+; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,2,3]
+; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: test_v8i64_1257:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-32-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
+; AVX512F-32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,2,3]
+; AVX512F-32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-32-NEXT: retl
+ %res = shufflevector <8 x i64> %v, <8 x i64> undef, <4 x i32> <i32 1, i32 2, i32 5, i32 7>
+ ret <4 x i64> %res
+}
+
+define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) {
+; AVX512F-LABEL: test_v8i64_2_5:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
+; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: test_v8i64_2_5:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextracti32x4 $1, %zmm0, %xmm1
+; AVX512F-32-NEXT: vpextrd $1, %xmm1, %eax
+; AVX512F-32-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX512F-32-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; AVX512F-32-NEXT: vpextrd $2, %xmm0, %eax
+; AVX512F-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX512F-32-NEXT: vpextrd $3, %xmm0, %eax
+; AVX512F-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX512F-32-NEXT: vzeroupper
+; AVX512F-32-NEXT: retl
+ %res = shufflevector <8 x i64> %v, <8 x i64> undef, <2 x i32> <i32 2, i32 5>
+ ret <2 x i64> %res
+}
diff --git a/test/CodeGen/X86/vector-truncate-combine.ll b/test/CodeGen/X86/vector-truncate-combine.ll
new file mode 100644
index 0000000000000..1a6dac8fa6e41
--- /dev/null
+++ b/test/CodeGen/X86/vector-truncate-combine.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=x86_64-- -O2 -start-after=stack-protector -stop-before=loops %s -o - | FileCheck %s
+
+; This test verifies the fix for PR33368.
+;
+; The expected outcome of the operation is to store bytes 0 and 2 of the incoming
+; parameter into c2 (a 2 x i8 vector). DAGCombine converts shuffles into a
+; sequence of extend and subsequent truncate operations. The bug was that an extension
+; by 4 followed by a truncation by 8 was completely eliminated.
+
+; The test checks for the correct sequence of operations that results from the
+; preservation of the extend/truncate operations mentioned above (2 extend and
+; 3 truncate instructions).
+;
+; NOTE: This operation could be collapsed in to a single truncate. Once that is done
+; this test will have to be adjusted.
+
+; CHECK: PUNPCKLBWrr
+; CHECK: PUNPCKLWDrr
+; CHECK: PACKUSWBrr
+; CHECK: PACKUSWBrr
+; CHECK: PACKUSWBrr
+
+define void @test(double %vec.coerce) local_unnamed_addr {
+entry:
+ %c2 = alloca <2 x i8>, align 2
+ %0 = bitcast double %vec.coerce to <8 x i8>
+ %1 = shufflevector <8 x i8> %0, <8 x i8> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 0>
+ %2 = shufflevector <4 x i8> %1, <4 x i8> undef, <2 x i32> <i32 3, i32 0>
+ store volatile <2 x i8> %2, <2 x i8>* %c2, align 2
+ br label %if.end
+
+if.end:
+ %3 = bitcast <2 x i8> %2 to i16
+ ret void
+}
diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll
index 6047279bc6ed0..1263605a6dc03 100644
--- a/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/test/CodeGen/X86/x86-interleaved-access.ll
@@ -194,6 +194,64 @@ define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <
define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32 x i8> %x3, <32 x i8> %x4, <128 x i8>* %p) {
+; AVX1-LABEL: interleaved_store_vf32_i8_stride4:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm5
+; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0]
+; AVX1-NEXT: vandnps %ymm5, %ymm4, %ymm5
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
+; AVX1-NEXT: vandps %ymm4, %ymm6, %ymm6
+; AVX1-NEXT: vorps %ymm5, %ymm6, %ymm8
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
+; AVX1-NEXT: vandnps %ymm6, %ymm4, %ymm6
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vandps %ymm4, %ymm5, %ymm5
+; AVX1-NEXT: vorps %ymm6, %ymm5, %ymm9
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
+; AVX1-NEXT: vandnps %ymm5, %ymm4, %ymm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6
+; AVX1-NEXT: vandps %ymm4, %ymm6, %ymm6
+; AVX1-NEXT: vorps %ymm5, %ymm6, %ymm5
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vandnps %ymm2, %ymm4, %ymm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps %ymm0, 96(%rdi)
+; AVX1-NEXT: vmovaps %ymm5, 64(%rdi)
+; AVX1-NEXT: vmovaps %ymm9, 32(%rdi)
+; AVX1-NEXT: vmovaps %ymm8, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: interleaved_store_vf32_i8_stride4:
; AVX2: # BB#0:
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
diff --git a/test/DebugInfo/COFF/lines-bb-start.ll b/test/DebugInfo/COFF/lines-bb-start.ll
new file mode 100644
index 0000000000000..249b38d34998b
--- /dev/null
+++ b/test/DebugInfo/COFF/lines-bb-start.ll
@@ -0,0 +1,97 @@
+; RUN: llc -O0 < %s | FileCheck %s
+
+source_filename = "t.c"
+target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
+target triple = "i386-pc-windows-msvc19.0.24215"
+
+@str_const = internal unnamed_addr constant [4 x i8] c"str\00", align 1
+
+declare i32 @puts(i8*)
+
+; We had a line info quality issue where the LEA for the string constant had no
+; location info, so the .cv_loc directive appeared after it. Now we have logic
+; that tries to emit the first valid location to the top of each MBB.
+
+define void @lea_str_loc(i1 %cond) !dbg !8 {
+entry:
+ br i1 %cond, label %if.then, label %if.end, !dbg !17
+
+if.then: ; preds = %entry
+ br label %return, !dbg !18
+
+if.end: ; preds = %entry
+ %call = call i32 @puts(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @str_const, i32 0, i32 0)), !dbg !19
+ br label %return, !dbg !20
+
+return: ; preds = %if.end, %if.then
+ ret void, !dbg !20
+}
+
+; The t.c:5 line marker should appear immediately after the BB label.
+
+; CHECK-LABEL: _lea_str_loc:
+; CHECK: .cv_loc {{.*}} # t.c:4:5
+; CHECK: jmp LBB{{.*}}
+; CHECK: LBB0_{{.*}}: # %if.end
+; CHECK-NEXT: .cv_loc {{.*}} # t.c:5:3
+; CHECK-NEXT: leal _str_const, %[[reg:[^ ]*]]
+; CHECK-NEXT: movl %[[reg]], (%esp)
+; CHECK-NEXT: calll _puts
+
+define void @instr_no_loc(i1 %cond) !dbg !21 {
+entry:
+ br i1 %cond, label %if.then, label %if.end, !dbg !22
+
+if.then: ; preds = %entry
+ br label %return, !dbg !23
+
+if.end: ; preds = %entry
+ call void asm sideeffect "nop", ""()
+ %call = call i32 @puts(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @str_const, i32 0, i32 0)), !dbg !24
+ br label %return, !dbg !25
+
+return: ; preds = %if.end, %if.then
+ ret void, !dbg !25
+}
+
+; CHECK-LABEL: _instr_no_loc:
+; CHECK: .cv_loc {{.*}} # t.c:4:5
+; CHECK: jmp LBB{{.*}}
+; CHECK: LBB1_{{.*}}: # %if.end
+; CHECK-NEXT: .cv_loc {{.*}} # t.c:5:3
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: leal _str_const, %[[reg:[^ ]*]]
+; CHECK-NEXT: movl %[[reg]], (%esp)
+; CHECK-NEXT: calll _puts
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5, !6}
+!llvm.ident = !{!7}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 ", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "t.c", directory: "C:\5Csrc\5Cllvm-project\5Cbuild", checksumkind: CSK_MD5, checksum: "b32df088e991f1996b4e4deb3855c14b")
+!2 = !{}
+!3 = !{i32 1, !"NumRegisterParameters", i32 0}
+!4 = !{i32 2, !"CodeView", i32 1}
+!5 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = !{i32 1, !"wchar_size", i32 2}
+!7 = !{!"clang version 5.0.0 "}
+!8 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 2, type: !9, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!9 = !DISubroutineType(types: !10)
+!10 = !{null, !11}
+!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!13 = !DIExpression()
+!14 = !DILocation(line: 2, column: 12, scope: !8)
+!15 = !DILocation(line: 3, column: 7, scope: !16)
+!16 = distinct !DILexicalBlock(scope: !8, file: !1, line: 3, column: 7)
+!17 = !DILocation(line: 3, column: 7, scope: !8)
+!18 = !DILocation(line: 4, column: 5, scope: !16)
+!19 = !DILocation(line: 5, column: 3, scope: !8)
+!20 = !DILocation(line: 6, column: 1, scope: !8)
+!21 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 2, type: !9, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!22 = !DILocation(line: 3, column: 7, scope: !21)
+!23 = !DILocation(line: 4, column: 5, scope: !21)
+!24 = !DILocation(line: 5, column: 3, scope: !21)
+!25 = !DILocation(line: 6, column: 1, scope: !21)
diff --git a/test/DebugInfo/COFF/local-variables.ll b/test/DebugInfo/COFF/local-variables.ll
index c0bac0d174a9d..249b6e1103dba 100644
--- a/test/DebugInfo/COFF/local-variables.ll
+++ b/test/DebugInfo/COFF/local-variables.ll
@@ -193,7 +193,7 @@
; OBJ: ChangeLineOffset: 1
; OBJ: ChangeCodeOffset: 0x35
; OBJ: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0xD, LineOffset: 1}
-; OBJ: ChangeCodeLength: 0xF
+; OBJ: ChangeCodeLength: 0xA
; OBJ: ]
; OBJ: }
; OBJ: Local {
diff --git a/test/DebugInfo/PDB/pdbdump-headers.test b/test/DebugInfo/PDB/pdbdump-headers.test
index 9a4544cce3831..3b7895e06b77d 100644
--- a/test/DebugInfo/PDB/pdbdump-headers.test
+++ b/test/DebugInfo/PDB/pdbdump-headers.test
@@ -114,11 +114,11 @@ ALL-NEXT: referent = 0x1004, mode = pointer, opts = const, kind = ptr
ALL-NEXT: 0x1006 | LF_ARGLIST [size = 12, hash = 194342]
ALL-NEXT: 0x1003: `__vc_attributes::threadingAttribute::threading_e`
ALL-NEXT: 0x1007 | LF_MFUNCTION [size = 28, hash = 254156]
-ALL-NEXT: return type = 1, # args = 0x1006, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1006
ALL-NEXT: class type = 0x1004, this type = 0x1005, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1008 | LF_MFUNCTION [size = 28, hash = 194536]
-ALL-NEXT: return type = 0, # args = 0x1000, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x1004, this type = 0x1005, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1009 | LF_METHODLIST [size = 20, hash = 167492]
@@ -153,17 +153,17 @@ ALL-NEXT: 0x1010 | LF_ARGLIST [size = 16, hash = 134580]
ALL-NEXT: 0x100D: `__vc_attributes::event_receiverAttribute::type_e`
ALL-NEXT: 0x0030 (bool): `bool`
ALL-NEXT: 0x1011 | LF_MFUNCTION [size = 28, hash = 148190]
-ALL-NEXT: return type = 2, # args = 0x1010, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 2, param list = 0x1010
ALL-NEXT: class type = 0x100E, this type = 0x100F, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1012 | LF_ARGLIST [size = 12, hash = 113636]
ALL-NEXT: 0x100D: `__vc_attributes::event_receiverAttribute::type_e`
ALL-NEXT: 0x1013 | LF_MFUNCTION [size = 28, hash = 53336]
-ALL-NEXT: return type = 1, # args = 0x1012, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1012
ALL-NEXT: class type = 0x100E, this type = 0x100F, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1014 | LF_MFUNCTION [size = 28, hash = 55779]
-ALL-NEXT: return type = 0, # args = 0x1000, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x100E, this type = 0x100F, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1015 | LF_METHODLIST [size = 28, hash = 220695]
@@ -199,11 +199,11 @@ ALL-NEXT: referent = 0x101A, mode = pointer, opts = const, kind = ptr
ALL-NEXT: 0x101C | LF_ARGLIST [size = 12, hash = 159978]
ALL-NEXT: 0x1019: `__vc_attributes::aggregatableAttribute::type_e`
ALL-NEXT: 0x101D | LF_MFUNCTION [size = 28, hash = 249504]
-ALL-NEXT: return type = 1, # args = 0x101C, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x101C
ALL-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x101E | LF_MFUNCTION [size = 28, hash = 141941]
-ALL-NEXT: return type = 0, # args = 0x1000, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x101F | LF_METHODLIST [size = 20, hash = 238785]
@@ -241,11 +241,11 @@ ALL-NEXT: referent = 0x1025, mode = pointer, opts = const, kind = ptr
ALL-NEXT: 0x1027 | LF_ARGLIST [size = 12, hash = 17744]
ALL-NEXT: 0x1022: `__vc_attributes::event_sourceAttribute::type_e`
ALL-NEXT: 0x1028 | LF_MFUNCTION [size = 28, hash = 239514]
-ALL-NEXT: return type = 1, # args = 0x1027, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1027
ALL-NEXT: class type = 0x1025, this type = 0x1026, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1029 | LF_MFUNCTION [size = 28, hash = 173189]
-ALL-NEXT: return type = 0, # args = 0x1000, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x1025, this type = 0x1026, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x102A | LF_METHODLIST [size = 20, hash = 130544]
@@ -303,17 +303,17 @@ ALL-NEXT: 0x0030 (bool): `bool`
ALL-NEXT: 0x1032: `const char*`
ALL-NEXT: 0x1032: `const char*`
ALL-NEXT: 0x1034 | LF_MFUNCTION [size = 28, hash = 48854]
-ALL-NEXT: return type = 15, # args = 0x1033, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 15, param list = 0x1033
ALL-NEXT: class type = 0x102F, this type = 0x1030, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1035 | LF_ARGLIST [size = 12, hash = 170035]
ALL-NEXT: 0x102E: `__vc_attributes::moduleAttribute::type_e`
ALL-NEXT: 0x1036 | LF_MFUNCTION [size = 28, hash = 177041]
-ALL-NEXT: return type = 1, # args = 0x1035, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1035
ALL-NEXT: class type = 0x102F, this type = 0x1030, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1037 | LF_MFUNCTION [size = 28, hash = 102745]
-ALL-NEXT: return type = 0, # args = 0x1000, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x102F, this type = 0x1030, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1038 | LF_METHODLIST [size = 28, hash = 16947]
@@ -389,7 +389,7 @@ ALL-NEXT: referent = 0x103D, mode = pointer, opts = const, kind = ptr
ALL-NEXT: 0x103F | LF_ARGLIST [size = 12, hash = 49018]
ALL-NEXT: 0x0075 (unsigned): `unsigned`
ALL-NEXT: 0x1040 | LF_MFUNCTION [size = 28, hash = 43821]
-ALL-NEXT: return type = 1, # args = 0x103F, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x103F
ALL-NEXT: class type = 0x103D, this type = 0x103E, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1041 | LF_FIELDLIST [size = 60, hash = 202555]
@@ -422,7 +422,7 @@ ALL-NEXT: referent = 0x1045, mode = pointer, opts = const, kind = ptr
ALL-NEXT: 0x1047 | LF_ARGLIST [size = 12, hash = 103930]
ALL-NEXT: 0x1044: `__vc_attributes::helper_attributes::v1_alttypeAttribute::type_e`
ALL-NEXT: 0x1048 | LF_MFUNCTION [size = 28, hash = 110942]
-ALL-NEXT: return type = 1, # args = 0x1047, param list = 0x0003 (void)
+ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1047
ALL-NEXT: class type = 0x1045, this type = 0x1046, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
ALL-NEXT: 0x1049 | LF_FIELDLIST [size = 64, hash = 17991]
@@ -474,64 +474,64 @@ ALL-NEXT: TI: 0x1000, Offset: 0
ALL: Hash Adjusters:
ALL: Public Symbols
ALL-NEXT: ============================================================
-ALL-NEXT: - S_PUB32 [size = 36] `?__purecall@@3PAXA`
+ALL-NEXT: 0 | S_PUB32 [size = 36] `?__purecall@@3PAXA`
ALL-NEXT: flags = none, addr = 0003:0000
-ALL-NEXT: - S_PUB32 [size = 20] `_main`
+ALL-NEXT: 36 | S_PUB32 [size = 20] `_main`
ALL-NEXT: flags = function, addr = 0001:0016
-ALL-NEXT: - S_PROCREF [size = 20] `main`
+ALL-NEXT: 56 | S_PROCREF [size = 20] `main`
ALL-NEXT: module = 1, sum name = 0, offset = 120
-ALL-NEXT: - S_GDATA32 [size = 28] `__purecall`
+ALL-NEXT: 76 | S_GDATA32 [size = 28] `__purecall`
ALL-NEXT: type = 0x0403 (void*), addr = 0003:0000
ALL: Symbols
ALL-NEXT: ============================================================
ALL-NEXT: Mod 0000 | `d:\src\llvm\test\DebugInfo\PDB\Inputs\empty.obj`:
-ALL-NEXT: - S_OBJNAME [size = 56] sig=0, `d:\src\llvm\test\DebugInfo\PDB\Inputs\empty.obj`
-ALL-NEXT: - S_COMPILE3 [size = 60]
-ALL-NEXT: machine = intel pentium 3, Ver = Microsoft (R) Optimizing Compiler, language = c++
-ALL-NEXT: frontend = 18.0.31101.0, backend = 18.0.31101.0
-ALL-NEXT: flags = security checks
-ALL-NEXT: - S_GPROC32 [size = 44] `main`
-ALL-NEXT: parent = 0, addr = 0001:0016, code size = 10, end = 196
-ALL-NEXT: debug start = 3, debug end = 8, flags = has fp
-ALL-NEXT: - S_FRAMEPROC [size = 32]
-ALL-NEXT: size = 0, padding size = 0, offset to padding = 0
-ALL-NEXT: bytes of callee saved registers = 0, exception handler addr = 0000:0000
-ALL-NEXT: flags = has async eh | opt speed
-ALL-NEXT: - S_END [size = 4]
-ALL-NEXT: - S_BUILDINFO [size = 8] BuildId = `4110`
+ALL-NEXT: 4 | S_OBJNAME [size = 56] sig=0, `d:\src\llvm\test\DebugInfo\PDB\Inputs\empty.obj`
+ALL-NEXT: 60 | S_COMPILE3 [size = 60]
+ALL-NEXT: machine = intel pentium 3, Ver = Microsoft (R) Optimizing Compiler, language = c++
+ALL-NEXT: frontend = 18.0.31101.0, backend = 18.0.31101.0
+ALL-NEXT: flags = security checks
+ALL-NEXT: 120 | S_GPROC32 [size = 44] `main`
+ALL-NEXT: parent = 0, end = 196, addr = 0001:0016, code size = 10
+ALL-NEXT: debug start = 3, debug end = 8, flags = has fp
+ALL-NEXT: 164 | S_FRAMEPROC [size = 32]
+ALL-NEXT: size = 0, padding size = 0, offset to padding = 0
+ALL-NEXT: bytes of callee saved registers = 0, exception handler addr = 0000:0000
+ALL-NEXT: flags = has async eh | opt speed
+ALL-NEXT: 196 | S_END [size = 4]
+ALL-NEXT: 200 | S_BUILDINFO [size = 8] BuildId = `4110`
ALL-NEXT: Mod 0001 | `* Linker *`:
-ALL-NEXT: - S_OBJNAME [size = 20] sig=0, `* Linker *`
-ALL-NEXT: - S_COMPILE3 [size = 48]
-ALL-NEXT: machine = intel 80386, Ver = Microsoft (R) LINK, language = link
-ALL-NEXT: frontend = 0.0.0.0, backend = 12.0.31101.0
-ALL-NEXT: flags = none
-ALL-NEXT: - S_ENVBLOCK [size = 172]
-ALL-NEXT: - cwd
-ALL-NEXT: - d:\src\llvm\test\DebugInfo\PDB\Inputs
-ALL-NEXT: - exe
-ALL-NEXT: - C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\link.exe
-ALL-NEXT: - pdb
-ALL-NEXT: - d:\src\llvm\test\DebugInfo\PDB\Inputs\empty.pdb
-ALL-NEXT: - S_TRAMPOLINE [size = 20]
-ALL-NEXT: type = tramp incremental, size = 5, source = 0001:0005, target = 0001:0005
-ALL-NEXT: - S_SECTION [size = 28] `.text`
-ALL-NEXT: length = 4122, alignment = 12, rva = 4096, section # = 1, characteristics = 1610612768
-ALL-NEXT: - S_COFFGROUP [size = 28] `.text$mn`
-ALL-NEXT: length = 4122, addr = 0001:0000, characteristics = 1610612768
-ALL-NEXT: - S_SECTION [size = 28] `.rdata`
-ALL-NEXT: length = 690, alignment = 12, rva = 12288, section # = 2, characteristics = 1073741888
-ALL-NEXT: - S_COFFGROUP [size = 28] `.rdata`
-ALL-NEXT: length = 323, addr = 0002:0000, characteristics = 1073741888
-ALL-NEXT: - S_COFFGROUP [size = 28] `.edata`
-ALL-NEXT: length = 0, addr = 0002:0323, characteristics = 1073741888
-ALL-NEXT: - S_COFFGROUP [size = 32] `.rdata$debug`
-ALL-NEXT: length = 366, addr = 0002:0324, characteristics = 1073741888
-ALL-NEXT: - S_SECTION [size = 28] `.data`
-ALL-NEXT: length = 4, alignment = 12, rva = 16384, section # = 3, characteristics = 3221225536
-ALL-NEXT: - S_COFFGROUP [size = 24] `.bss`
-ALL-NEXT: length = 4, addr = 0003:0000, characteristics = 3221225600
-ALL-NEXT: - S_SECTION [size = 28] `.reloc`
-ALL-NEXT: length = 8, alignment = 12, rva = 20480, section # = 4, characteristics = 1107296320
+ALL-NEXT: 4 | S_OBJNAME [size = 20] sig=0, `* Linker *`
+ALL-NEXT: 24 | S_COMPILE3 [size = 48]
+ALL-NEXT: machine = intel 80386, Ver = Microsoft (R) LINK, language = link
+ALL-NEXT: frontend = 0.0.0.0, backend = 12.0.31101.0
+ALL-NEXT: flags = none
+ALL-NEXT: 72 | S_ENVBLOCK [size = 172]
+ALL-NEXT: - cwd
+ALL-NEXT: - d:\src\llvm\test\DebugInfo\PDB\Inputs
+ALL-NEXT: - exe
+ALL-NEXT: - C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\link.exe
+ALL-NEXT: - pdb
+ALL-NEXT: - d:\src\llvm\test\DebugInfo\PDB\Inputs\empty.pdb
+ALL-NEXT: 244 | S_TRAMPOLINE [size = 20]
+ALL-NEXT: type = tramp incremental, size = 5, source = 0001:0005, target = 0001:0005
+ALL-NEXT: 264 | S_SECTION [size = 28] `.text`
+ALL-NEXT: length = 4122, alignment = 12, rva = 4096, section # = 1, characteristics = 1610612768
+ALL-NEXT: 292 | S_COFFGROUP [size = 28] `.text$mn`
+ALL-NEXT: length = 4122, addr = 0001:0000, characteristics = 1610612768
+ALL-NEXT: 320 | S_SECTION [size = 28] `.rdata`
+ALL-NEXT: length = 690, alignment = 12, rva = 12288, section # = 2, characteristics = 1073741888
+ALL-NEXT: 348 | S_COFFGROUP [size = 28] `.rdata`
+ALL-NEXT: length = 323, addr = 0002:0000, characteristics = 1073741888
+ALL-NEXT: 376 | S_COFFGROUP [size = 28] `.edata`
+ALL-NEXT: length = 0, addr = 0002:0323, characteristics = 1073741888
+ALL-NEXT: 404 | S_COFFGROUP [size = 32] `.rdata$debug`
+ALL-NEXT: length = 366, addr = 0002:0324, characteristics = 1073741888
+ALL-NEXT: 436 | S_SECTION [size = 28] `.data`
+ALL-NEXT: length = 4, alignment = 12, rva = 16384, section # = 3, characteristics = 3221225536
+ALL-NEXT: 464 | S_COFFGROUP [size = 24] `.bss`
+ALL-NEXT: length = 4, addr = 0003:0000, characteristics = 3221225600
+ALL-NEXT: 488 | S_SECTION [size = 28] `.reloc`
+ALL-NEXT: length = 8, alignment = 12, rva = 20480, section # = 4, characteristics = 1107296320
ALL: Section Contributions
ALL-NEXT: ============================================================
ALL-NEXT: SC | mod = 1, 0001:0000, size = 10, data crc = 0, reloc crc = 0
diff --git a/test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test b/test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test
index d3b7ae56eaac6..3903c07b027fb 100644
--- a/test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test
+++ b/test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test
@@ -24,7 +24,7 @@ TPI-TYPES-NEXT: referent = 0x1003, mode = pointer, opts = None, kind
TPI-TYPES-NEXT: 0x1005 | LF_ARGLIST [size = 12]
TPI-TYPES-NEXT: 0x0074 (int): `int`
TPI-TYPES-NEXT: 0x1006 | LF_MFUNCTION [size = 28]
-TPI-TYPES-NEXT: return type = 1, # args = 0x1005, param list = 0x0003 (void)
+TPI-TYPES-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1005
TPI-TYPES-NEXT: class type = 0x1003, this type = 0x1004, this adjust = 0
TPI-TYPES-NEXT: calling conv = thiscall, options = constructor
TPI-TYPES-NEXT: 0x1007 | LF_PROCEDURE [size = 16]
diff --git a/test/DebugInfo/dwarfdump-accel.test b/test/DebugInfo/dwarfdump-accel.test
index c6a971a2b9aa1..a49d024992c26 100644
--- a/test/DebugInfo/dwarfdump-accel.test
+++ b/test/DebugInfo/dwarfdump-accel.test
@@ -1,4 +1,5 @@
RUN: llvm-dwarfdump %p/Inputs/dwarfdump-objc.x86_64.o | FileCheck %s
+RUN: llvm-dwarfdump -verify %p/Inputs/dwarfdump-objc.x86_64.o | FileCheck %s --check-prefix=VERIFY
Gather some DIE indexes to verify the accelerator table contents.
CHECK: .debug_info contents
@@ -63,3 +64,7 @@ CHECK-NOT: Name
CHECK: {Atom[0]: [[READONLY]]}
CHECK: {Atom[0]: [[ASSIGN]]}
CHECK: {Atom[0]: [[SETASSIGN]]}
+
+Verify the debug info in the apple_names accelerator table.
+VERIFY: Verifying .apple_names
+VERIFY-NEXT: No errors.
diff --git a/test/Feature/optnone-opt.ll b/test/Feature/optnone-opt.ll
index efd35e5660300..6410afb6be99a 100644
--- a/test/Feature/optnone-opt.ll
+++ b/test/Feature/optnone-opt.ll
@@ -2,7 +2,7 @@
; RUN: opt -O1 -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-O1
; RUN: opt -O2 -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-O1 --check-prefix=OPT-O2O3
; RUN: opt -O3 -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-O1 --check-prefix=OPT-O2O3
-; RUN: opt -bb-vectorize -dce -die -gvn-hoist -loweratomic -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-MORE
+; RUN: opt -dce -die -gvn-hoist -loweratomic -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-MORE
; RUN: opt -indvars -licm -loop-deletion -loop-extract -loop-idiom -loop-instsimplify -loop-reduce -loop-reroll -loop-rotate -loop-unroll -loop-unswitch -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-LOOP
; REQUIRES: asserts
@@ -55,7 +55,6 @@ attributes #0 = { optnone noinline }
; OPT-O2O3-DAG: Skipping pass 'SLP Vectorizer'
; Additional IR passes that opt doesn't turn on by default.
-; OPT-MORE-DAG: Skipping pass 'Basic-Block Vectorization'
; OPT-MORE-DAG: Skipping pass 'Dead Code Elimination'
; OPT-MORE-DAG: Skipping pass 'Dead Instruction Elimination'
; OPT-MORE-DAG: Skipping pass 'Lower atomic intrinsics
diff --git a/test/Instrumentation/MemorySanitizer/msan_basic.ll b/test/Instrumentation/MemorySanitizer/msan_basic.ll
index 334e00dabf400..ffb239a152563 100644
--- a/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -535,8 +535,8 @@ entry:
; CHECK: ret i1
-; Check that loads of shadow have the same aligment as the original loads.
-; Check that loads of origin have the aligment of max(4, original alignment).
+; Check that loads of shadow have the same alignment as the original loads.
+; Check that loads of origin have the alignment of max(4, original alignment).
define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
%y = alloca i32, align 64
diff --git a/test/LTO/Resolution/X86/Inputs/comdat-mixed-lto.ll b/test/LTO/Resolution/X86/Inputs/comdat-mixed-lto.ll
new file mode 100644
index 0000000000000..0112b89f98dbe
--- /dev/null
+++ b/test/LTO/Resolution/X86/Inputs/comdat-mixed-lto.ll
@@ -0,0 +1,23 @@
+; ModuleID = 'comdat-mixed-lto1.o'
+source_filename = "comdat-mixed-lto1.cpp"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%"class.Test::ptr" = type { i32 }
+
+$C = comdat any
+
+@C = linkonce_odr global %"class.Test::ptr" zeroinitializer, comdat, align 4
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @__cxx_global_var_init, i8* bitcast (%"class.Test::ptr"* @C to i8*) }]
+
+define void @testglobfunc() #1 section ".text.startup" comdat($C) {
+entry:
+ ret void
+}
+
+; Function Attrs: noinline uwtable
+define internal void @__cxx_global_var_init() #1 section ".text.startup" comdat($C) {
+entry:
+ store i32 0, i32* getelementptr inbounds (%"class.Test::ptr", %"class.Test::ptr"* @C, i32 0, i32 0), align 4
+ ret void
+}
diff --git a/test/LTO/Resolution/X86/comdat-mixed-lto.ll b/test/LTO/Resolution/X86/comdat-mixed-lto.ll
new file mode 100644
index 0000000000000..f6ee22e4161d9
--- /dev/null
+++ b/test/LTO/Resolution/X86/comdat-mixed-lto.ll
@@ -0,0 +1,42 @@
+; Test of comdat handling with mixed thinlto and regular lto compilation.
+
+; This module is compiled with ThinLTO
+; RUN: opt -module-summary -o %t1.o %s
+; Input module compiled for regular LTO
+; RUN: opt -o %t2.o %p/Inputs/comdat-mixed-lto.ll
+
+; The copy of C from this module is prevailing. The copy of C from the
+; regular LTO module is not prevailing, and will be dropped to
+; available_externally.
+; RUN: llvm-lto2 run -r=%t1.o,C,pl -r=%t2.o,C,l -r=%t2.o,testglobfunc,lxp -r=%t1.o,testglobfunc,lx -o %t3 %t1.o %t2.o -save-temps
+
+; The Input module (regular LTO) is %t3.0. Check to make sure that we removed
+; __cxx_global_var_init and testglobfunc from comdat. Also check to ensure
+; that testglobfunc was dropped to available_externally. Otherwise we would
+; have linker multiply defined errors as it is no longer in a comdat and
+; would clash with the copy from this module.
+; RUN: llvm-dis %t3.0.0.preopt.bc -o - | FileCheck %s
+; CHECK: define internal void @__cxx_global_var_init() section ".text.startup" {
+; CHECK: define available_externally void @testglobfunc() section ".text.startup" {
+
+; ModuleID = 'comdat-mixed-lto.o'
+source_filename = "comdat-mixed-lto.cpp"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%"class.Test::ptr" = type { i32 }
+
+$C = comdat any
+
+@C = linkonce_odr global %"class.Test::ptr" zeroinitializer, comdat, align 4
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @__cxx_global_var_init, i8* bitcast (%"class.Test::ptr"* @C to i8*) }]
+define void @testglobfunc() #1 section ".text.startup" comdat($C) {
+entry:
+ ret void
+}
+
+; Function Attrs: noinline uwtable
+define internal void @__cxx_global_var_init() #1 section ".text.startup" comdat($C) {
+entry:
+ ret void
+}
diff --git a/test/MC/AArch64/coff-basic.ll b/test/MC/AArch64/coff-basic.ll
new file mode 100644
index 0000000000000..23f06b5360db7
--- /dev/null
+++ b/test/MC/AArch64/coff-basic.ll
@@ -0,0 +1,8 @@
+; RUN: llc -mtriple aarch64-windows < %s | FileCheck %s
+
+define i32 @foo() {
+entry:
+ ret i32 1
+}
+
+; CHECK: .globl foo
diff --git a/test/MC/AMDGPU/code-object-metadata-kernel-args.s b/test/MC/AMDGPU/code-object-metadata-kernel-args.s
index 90915e61f99a4..46cf4f506a5cd 100644
--- a/test/MC/AMDGPU/code-object-metadata-kernel-args.s
+++ b/test/MC/AMDGPU/code-object-metadata-kernel-args.s
@@ -4,7 +4,9 @@
// CHECK: .amdgpu_code_object_metadata
// CHECK: Version: [ 1, 0 ]
-// CHECK: Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+// CHECK: Printf:
+// CHECK: - '1:1:4:%d\n'
+// CHECK: - '2:1:8:%g\n'
// CHECK: Kernels:
// CHECK: - Name: test_kernel
// CHECK: Language: OpenCL C
diff --git a/test/MC/AMDGPU/code-object-metadata-kernel-attrs.s b/test/MC/AMDGPU/code-object-metadata-kernel-attrs.s
index 9669fcf539395..7884b6672e7e0 100644
--- a/test/MC/AMDGPU/code-object-metadata-kernel-attrs.s
+++ b/test/MC/AMDGPU/code-object-metadata-kernel-attrs.s
@@ -4,7 +4,9 @@
// CHECK: .amdgpu_code_object_metadata
// CHECK: Version: [ 1, 0 ]
-// CHECK: Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+// CHECK: Printf:
+// CHECK: - '1:1:4:%d\n'
+// CHECK: - '2:1:8:%g\n'
// CHECK: Kernels:
// CHECK: - Name: test_kernel
// CHECK: Language: OpenCL C
diff --git a/test/MC/AVR/out-of-range-fixups/adiw-fail.s b/test/MC/AVR/out-of-range-fixups/adiw-fail.s
deleted file mode 100644
index ab734695c9c57..0000000000000
--- a/test/MC/AVR/out-of-range-fixups/adiw-fail.s
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: not llvm-mc -triple avr -mattr=avr6 -filetype=obj < %s 2>&1 | FileCheck %s
-
-; CHECK: error: out of range immediate (expected an integer in the range 0 to 63)
-adiw r24, foo+64
-
diff --git a/test/MC/AVR/out-of-range-fixups/in-fail.s b/test/MC/AVR/out-of-range-fixups/in-fail.s
deleted file mode 100644
index b929ead4c5a59..0000000000000
--- a/test/MC/AVR/out-of-range-fixups/in-fail.s
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: not llvm-mc -triple avr -mattr=avr6 -filetype=obj < %s 2>&1 | FileCheck %s
-
-; CHECK: error: out of range port number (expected an integer in the range 0 to 63)
-in r3, foo+64
-
diff --git a/test/MC/AVR/out-of-range-fixups/lds-fail.s b/test/MC/AVR/out-of-range-fixups/lds-fail.s
deleted file mode 100644
index e28ad3e861bca..0000000000000
--- a/test/MC/AVR/out-of-range-fixups/lds-fail.s
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: not llvm-mc -triple avr -mattr=avr6 -filetype=obj < %s 2>&1 | FileCheck %s
-
-; CHECK: error: out of range port number (expected an integer in the range 0 to 65535)
-lds r2, foo+65536
-
diff --git a/test/MC/AVR/out-of-range-fixups/sbi-fail.s b/test/MC/AVR/out-of-range-fixups/sbi-fail.s
deleted file mode 100644
index 4f23faacd61f5..0000000000000
--- a/test/MC/AVR/out-of-range-fixups/sbi-fail.s
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: not llvm-mc -triple avr -mattr=avr6 -filetype=obj < %s 2>&1 | FileCheck %s
-
-; CHECK: error: out of range port number (expected an integer in the range 0 to 31)
-sbi foo+32, 1
-
diff --git a/test/MC/Disassembler/SystemZ/insns.txt b/test/MC/Disassembler/SystemZ/insns.txt
index 75f7f9669b5cd..2ca19363c46b1 100644
--- a/test/MC/Disassembler/SystemZ/insns.txt
+++ b/test/MC/Disassembler/SystemZ/insns.txt
@@ -406,6 +406,36 @@
# CHECK: ah %r15, 0
0x4a 0xf0 0x00 0x00
+# CHECK: ahhhr %r0, %r0, %r0
+0xb9 0xc8 0x00 0x00
+
+# CHECK: ahhhr %r0, %r0, %r15
+0xb9 0xc8 0xf0 0x00
+
+# CHECK: ahhhr %r0, %r15, %r0
+0xb9 0xc8 0x00 0x0f
+
+# CHECK: ahhhr %r15, %r0, %r0
+0xb9 0xc8 0x00 0xf0
+
+# CHECK: ahhhr %r7, %r8, %r9
+0xb9 0xc8 0x90 0x78
+
+# CHECK: ahhlr %r0, %r0, %r0
+0xb9 0xd8 0x00 0x00
+
+# CHECK: ahhlr %r0, %r0, %r15
+0xb9 0xd8 0xf0 0x00
+
+# CHECK: ahhlr %r0, %r15, %r0
+0xb9 0xd8 0x00 0x0f
+
+# CHECK: ahhlr %r15, %r0, %r0
+0xb9 0xd8 0x00 0xf0
+
+# CHECK: ahhlr %r7, %r8, %r9
+0xb9 0xd8 0x90 0x78
+
# CHECK: ahi %r0, -32768
0xa7 0x0a 0x80 0x00
@@ -754,6 +784,36 @@
# CHECK: algsi 524287(%r15), 42
0xeb 0x2a 0xff 0xff 0x7f 0x7e
+# CHECK: alhhhr %r0, %r0, %r0
+0xb9 0xca 0x00 0x00
+
+# CHECK: alhhhr %r0, %r0, %r15
+0xb9 0xca 0xf0 0x00
+
+# CHECK: alhhhr %r0, %r15, %r0
+0xb9 0xca 0x00 0x0f
+
+# CHECK: alhhhr %r15, %r0, %r0
+0xb9 0xca 0x00 0xf0
+
+# CHECK: alhhhr %r7, %r8, %r9
+0xb9 0xca 0x90 0x78
+
+# CHECK: alhhlr %r0, %r0, %r0
+0xb9 0xda 0x00 0x00
+
+# CHECK: alhhlr %r0, %r0, %r15
+0xb9 0xda 0xf0 0x00
+
+# CHECK: alhhlr %r0, %r15, %r0
+0xb9 0xda 0x00 0x0f
+
+# CHECK: alhhlr %r15, %r0, %r0
+0xb9 0xda 0x00 0xf0
+
+# CHECK: alhhlr %r7, %r8, %r9
+0xb9 0xda 0x90 0x78
+
# CHECK: alhsik %r0, %r1, -32768
0xec 0x01 0x80 0x00 0x00 0xda
@@ -826,6 +886,42 @@
# CHECK: alsi 524287(%r15), 42
0xeb 0x2a 0xff 0xff 0x7f 0x6e
+# CHECK: alsih %r0, -2147483648
+0xcc 0x0a 0x80 0x00 0x00 0x00
+
+# CHECK: alsih %r0, -1
+0xcc 0x0a 0xff 0xff 0xff 0xff
+
+# CHECK: alsih %r0, 0
+0xcc 0x0a 0x00 0x00 0x00 0x00
+
+# CHECK: alsih %r0, 1
+0xcc 0x0a 0x00 0x00 0x00 0x01
+
+# CHECK: alsih %r0, 2147483647
+0xcc 0x0a 0x7f 0xff 0xff 0xff
+
+# CHECK: alsih %r15, 0
+0xcc 0xfa 0x00 0x00 0x00 0x00
+
+# CHECK: alsihn %r0, -2147483648
+0xcc 0x0b 0x80 0x00 0x00 0x00
+
+# CHECK: alsihn %r0, -1
+0xcc 0x0b 0xff 0xff 0xff 0xff
+
+# CHECK: alsihn %r0, 0
+0xcc 0x0b 0x00 0x00 0x00 0x00
+
+# CHECK: alsihn %r0, 1
+0xcc 0x0b 0x00 0x00 0x00 0x01
+
+# CHECK: alsihn %r0, 2147483647
+0xcc 0x0b 0x7f 0xff 0xff 0xff
+
+# CHECK: alsihn %r15, 0
+0xcc 0xfb 0x00 0x00 0x00 0x00
+
# CHECK: aly %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x5e
@@ -1126,6 +1222,18 @@
# CHECK: b 4095(%r15,%r1)
0x47 0xff 0x1f 0xff
+# CHECK: bakr %r0, %r0
+0xb2 0x40 0x00 0x00
+
+# CHECK: bakr %r0, %r15
+0xb2 0x40 0x00 0x0f
+
+# CHECK: bakr %r15, %r0
+0xb2 0x40 0x00 0xf0
+
+# CHECK: bakr %r7, %r8
+0xb2 0x40 0x00 0x78
+
# CHECK: bal %r0, 0
0x45 0x00 0x00 0x00
@@ -1387,6 +1495,30 @@
# CHECK: bctr %r15, %r9
0x06 0xf9
+# CHECK: bsa %r0, %r0
+0xb2 0x5a 0x00 0x00
+
+# CHECK: bsa %r0, %r15
+0xb2 0x5a 0x00 0x0f
+
+# CHECK: bsa %r15, %r0
+0xb2 0x5a 0x00 0xf0
+
+# CHECK: bsa %r7, %r8
+0xb2 0x5a 0x00 0x78
+
+# CHECK: bsg %r0, %r0
+0xb2 0x58 0x00 0x00
+
+# CHECK: bsg %r0, %r15
+0xb2 0x58 0x00 0x0f
+
+# CHECK: bsg %r15, %r0
+0xb2 0x58 0x00 0xf0
+
+# CHECK: bsg %r7, %r8
+0xb2 0x58 0x00 0x78
+
# CHECK: bsm %r0, %r1
0x0b 0x01
@@ -3058,6 +3190,18 @@
# CHECK: chf %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xcd
+# CHECK: chhr %r0, %r0
+0xb9 0xcd 0x00 0x00
+
+# CHECK: chhr %r0, %r15
+0xb9 0xcd 0x00 0x0f
+
+# CHECK: chhr %r15, %r0
+0xb9 0xcd 0x00 0xf0
+
+# CHECK: chhr %r7, %r8
+0xb9 0xcd 0x00 0x78
+
# CHECK: chhsi 0, 0
0xe5 0x54 0x00 0x00 0x00 0x00
@@ -3109,6 +3253,18 @@
# CHECK: chi %r15, 0
0xa7 0xfe 0x00 0x00
+# CHECK: chlr %r0, %r0
+0xb9 0xdd 0x00 0x00
+
+# CHECK: chlr %r0, %r15
+0xb9 0xdd 0x00 0x0f
+
+# CHECK: chlr %r15, %r0
+0xb9 0xdd 0x00 0xf0
+
+# CHECK: chlr %r7, %r8
+0xb9 0xdd 0x00 0x78
+
# CHECK: chsi 0, 0
0xe5 0x5c 0x00 0x00 0x00 0x00
@@ -3940,6 +4096,18 @@
# CHECK: clhf %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xcf
+# CHECK: clhhr %r0, %r0
+0xb9 0xcf 0x00 0x00
+
+# CHECK: clhhr %r0, %r15
+0xb9 0xcf 0x00 0x0f
+
+# CHECK: clhhr %r15, %r0
+0xb9 0xcf 0x00 0xf0
+
+# CHECK: clhhr %r7, %r8
+0xb9 0xcf 0x00 0x78
+
# CHECK: clhhsi 0, 0
0xe5 0x55 0x00 0x00 0x00 0x00
@@ -3961,6 +4129,18 @@
# CHECK: clhhsi 4095(%r15), 42
0xe5 0x55 0xff 0xff 0x00 0x2a
+# CHECK: clhlr %r0, %r0
+0xb9 0xdf 0x00 0x00
+
+# CHECK: clhlr %r0, %r15
+0xb9 0xdf 0x00 0x0f
+
+# CHECK: clhlr %r15, %r0
+0xb9 0xdf 0x00 0xf0
+
+# CHECK: clhlr %r7, %r8
+0xb9 0xdf 0x00 0x78
+
# CHECK: cli 0, 0
0x95 0x00 0x00 0x00
@@ -4582,6 +4762,24 @@
# CHECK: crb %r0, %r0, 15, 0
0xec 0x00 0x00 0x00 0xf0 0xf6
+# CHECK: crdte %r0, %r0, %r0
+0xb9 0x8f 0x00 0x00
+
+# CHECK: crdte %r0, %r0, %r14
+0xb9 0x8f 0x00 0x0e
+
+# CHECK: crdte %r0, %r15, %r0
+0xb9 0x8f 0xf0 0x00
+
+# CHECK: crdte %r14, %r0, %r0
+0xb9 0x8f 0x00 0xe0
+
+# CHECK: crdte %r0, %r0, %r0, 15
+0xb9 0x8f 0x0f 0x00
+
+# CHECK: crdte %r4, %r5, %r6, 7
+0xb9 0x8f 0x57 0x46
+
# CHECK: crth %r0, %r1
0xb9 0x72 0x20 0x01
@@ -4624,6 +4822,9 @@
# CHECK: cs %r15, %r0, 0
0xba 0xf0 0x00 0x00
+# CHECK: csch
+0xb2 0x30 0x00 0x00
+
# CHECK: csdtr %r0, %f0, 0
0xb3 0xe3 0x00 0x00
@@ -4672,6 +4873,30 @@
# CHECK: csg %r15, %r0, 0
0xeb 0xf0 0x00 0x00 0x00 0x30
+# CHECK: csp %r0, %r0
+0xb2 0x50 0x00 0x00
+
+# CHECK: csp %r0, %r15
+0xb2 0x50 0x00 0x0f
+
+# CHECK: csp %r14, %r0
+0xb2 0x50 0x00 0xe0
+
+# CHECK: csp %r6, %r8
+0xb2 0x50 0x00 0x68
+
+# CHECK: cspg %r0, %r0
+0xb9 0x8a 0x00 0x00
+
+# CHECK: cspg %r0, %r15
+0xb9 0x8a 0x00 0x0f
+
+# CHECK: cspg %r14, %r0
+0xb9 0x8a 0x00 0xe0
+
+# CHECK: cspg %r6, %r8
+0xb9 0x8a 0x00 0x68
+
# CHECK: csst 0, 0, %r0
0xc8 0x02 0x00 0x00 0x00 0x00
@@ -5623,6 +5848,36 @@
# CHECK: der %f15, %f0
0x3d 0xf0
+# CHECK: diag %r0, %r0, 0
+0x83 0x00 0x00 0x00
+
+# CHECK: diag %r0, %r15, 0
+0x83 0x0f 0x00 0x00
+
+# CHECK: diag %r14, %r15, 0
+0x83 0xef 0x00 0x00
+
+# CHECK: diag %r15, %r15, 0
+0x83 0xff 0x00 0x00
+
+# CHECK: diag %r0, %r0, 4095
+0x83 0x00 0x0f 0xff
+
+# CHECK: diag %r0, %r0, 1
+0x83 0x00 0x00 0x01
+
+# CHECK: diag %r0, %r0, 0(%r1)
+0x83 0x00 0x10 0x00
+
+# CHECK: diag %r0, %r0, 0(%r15)
+0x83 0x00 0xf0 0x00
+
+# CHECK: diag %r0, %r0, 4095(%r1)
+0x83 0x00 0x1f 0xff
+
+# CHECK: diag %r0, %r0, 4095(%r15)
+0x83 0x00 0xff 0xff
+
# CHECK: didbr %f0, %f0, %f0, 1
0xb3 0x5b 0x01 0x00
@@ -5992,6 +6247,30 @@
# CHECK: ecag %r0, %r0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0x4c
+# CHECK: ecctr %r0, %r0
+0xb2 0xe4 0x00 0x00
+
+# CHECK: ecctr %r0, %r15
+0xb2 0xe4 0x00 0x0f
+
+# CHECK: ecctr %r15, %r0
+0xb2 0xe4 0x00 0xf0
+
+# CHECK: ecctr %r7, %r8
+0xb2 0xe4 0x00 0x78
+
+# CHECK: ecpga %r0, %r0
+0xb2 0xed 0x00 0x00
+
+# CHECK: ecpga %r0, %r15
+0xb2 0xed 0x00 0x0f
+
+# CHECK: ecpga %r15, %r0
+0xb2 0xed 0x00 0xf0
+
+# CHECK: ecpga %r7, %r8
+0xb2 0xed 0x00 0x78
+
# CHECK: ectg 0, 0, %r0
0xc8 0x01 0x00 0x00 0x00 0x00
@@ -6118,6 +6397,36 @@
# CHECK: efpc %r15
0xb3 0x8c 0x00 0xf0
+# CHECK: epar %r0
+0xb2 0x26 0x00 0x00
+
+# CHECK: epar %r1
+0xb2 0x26 0x00 0x10
+
+# CHECK: epar %r15
+0xb2 0x26 0x00 0xf0
+
+# CHECK: epair %r0
+0xb9 0x9a 0x00 0x00
+
+# CHECK: epair %r1
+0xb9 0x9a 0x00 0x10
+
+# CHECK: epair %r15
+0xb9 0x9a 0x00 0xf0
+
+# CHECK: epctr %r0, %r0
+0xb2 0xe5 0x00 0x00
+
+# CHECK: epctr %r0, %r15
+0xb2 0xe5 0x00 0x0f
+
+# CHECK: epctr %r15, %r0
+0xb2 0xe5 0x00 0xf0
+
+# CHECK: epctr %r7, %r8
+0xb2 0xe5 0x00 0x78
+
# CHECK: epsw %r0, %r0
0xb9 0x8d 0x00 0x00
@@ -6130,6 +6439,48 @@
# CHECK: epsw %r6, %r8
0xb9 0x8d 0x00 0x68
+# CHECK: ereg %r0, %r0
+0xb2 0x49 0x00 0x00
+
+# CHECK: ereg %r0, %r15
+0xb2 0x49 0x00 0x0f
+
+# CHECK: ereg %r15, %r0
+0xb2 0x49 0x00 0xf0
+
+# CHECK: ereg %r7, %r8
+0xb2 0x49 0x00 0x78
+
+# CHECK: eregg %r0, %r0
+0xb9 0x0e 0x00 0x00
+
+# CHECK: eregg %r0, %r15
+0xb9 0x0e 0x00 0x0f
+
+# CHECK: eregg %r15, %r0
+0xb9 0x0e 0x00 0xf0
+
+# CHECK: eregg %r7, %r8
+0xb9 0x0e 0x00 0x78
+
+# CHECK: esar %r0
+0xb2 0x27 0x00 0x00
+
+# CHECK: esar %r1
+0xb2 0x27 0x00 0x10
+
+# CHECK: esar %r15
+0xb2 0x27 0x00 0xf0
+
+# CHECK: esair %r0
+0xb9 0x9b 0x00 0x00
+
+# CHECK: esair %r1
+0xb9 0x9b 0x00 0x10
+
+# CHECK: esair %r15
+0xb9 0x9b 0x00 0xf0
+
# CHECK: esdtr %f0, %f9
0xb3 0xe7 0x00 0x09
@@ -6142,6 +6493,27 @@
# CHECK: esdtr %f15, %f9
0xb3 0xe7 0x00 0xf9
+# CHECK: esea %r0
+0xb9 0x9d 0x00 0x00
+
+# CHECK: esea %r1
+0xb9 0x9d 0x00 0x10
+
+# CHECK: esea %r15
+0xb9 0x9d 0x00 0xf0
+
+# CHECK: esta %r0, %r0
+0xb2 0x4a 0x00 0x00
+
+# CHECK: esta %r0, %r15
+0xb2 0x4a 0x00 0x0f
+
+# CHECK: esta %r14, %r0
+0xb2 0x4a 0x00 0xe0
+
+# CHECK: esta %r6, %r8
+0xb2 0x4a 0x00 0x68
+
# CHECK: esxtr %f0, %f8
0xb3 0xef 0x00 0x08
@@ -6391,6 +6763,18 @@
# CHECK: her %f15, %f0
0x34 0xf0
+# CHECK: hsch
+0xb2 0x31 0x00 0x00
+
+# CHECK: iac %r0
+0xb2 0x24 0x00 0x00
+
+# CHECK: iac %r1
+0xb2 0x24 0x00 0x10
+
+# CHECK: iac %r15
+0xb2 0x24 0x00 0xf0
+
# CHECK: ic %r0, 0
0x43 0x00 0x00 0x00
@@ -6523,6 +6907,24 @@
# CHECK: icy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x73
+# CHECK: idte %r0, %r0, %r0
+0xb9 0x8e 0x00 0x00
+
+# CHECK: idte %r0, %r0, %r15
+0xb9 0x8e 0x00 0x0f
+
+# CHECK: idte %r0, %r15, %r0
+0xb9 0x8e 0xf0 0x00
+
+# CHECK: idte %r15, %r0, %r0
+0xb9 0x8e 0x00 0xf0
+
+# CHECK: idte %r0, %r0, %r0, 15
+0xb9 0x8e 0x0f 0x00
+
+# CHECK: idte %r4, %r5, %r6, 7
+0xb9 0x8e 0x57 0x46
+
# CHECK: iedtr %f0, %f0, %f0
0xb3 0xf6 0x00 0x00
@@ -6625,6 +7027,9 @@
# CHECK: iill %r15, 0
0xa5 0xf3 0x00 0x00
+# CHECK: ipk
+0xb2 0x0b 0x00 0x00
+
# CHECK: ipm %r0
0xb2 0x22 0x00 0x00
@@ -6634,6 +7039,48 @@
# CHECK: ipm %r15
0xb2 0x22 0x00 0xf0
+# CHECK: ipte %r0, %r0
+0xb2 0x21 0x00 0x00
+
+# CHECK: ipte %r0, %r15
+0xb2 0x21 0x00 0x0f
+
+# CHECK: ipte %r15, %r0
+0xb2 0x21 0x00 0xf0
+
+# CHECK: ipte %r0, %r0, %r15
+0xb2 0x21 0xf0 0x00
+
+# CHECK: ipte %r0, %r0, %r0, 15
+0xb2 0x21 0x0f 0x00
+
+# CHECK: ipte %r7, %r8, %r9, 10
+0xb2 0x21 0x9a 0x78
+
+# CHECK: iske %r0, %r0
+0xb2 0x29 0x00 0x00
+
+# CHECK: iske %r0, %r15
+0xb2 0x29 0x00 0x0f
+
+# CHECK: iske %r15, %r0
+0xb2 0x29 0x00 0xf0
+
+# CHECK: iske %r7, %r8
+0xb2 0x29 0x00 0x78
+
+# CHECK: ivsk %r0, %r0
+0xb2 0x23 0x00 0x00
+
+# CHECK: ivsk %r0, %r15
+0xb2 0x23 0x00 0x0f
+
+# CHECK: ivsk %r15, %r0
+0xb2 0x23 0x00 0xf0
+
+# CHECK: ivsk %r7, %r8
+0xb2 0x23 0x00 0x78
+
# CHECK: kdb %f0, 0
0xed 0x00 0x00 0x00 0x00 0x18
@@ -7258,6 +7705,36 @@
# CHECK: laog %r15, %r0, 0
0xeb 0xf0 0x00 0x00 0x00 0xe6
+# CHECK: lasp 0, 0
+0xe5 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: lasp 0(%r1), 0(%r2)
+0xe5 0x00 0x10 0x00 0x20 0x00
+
+# CHECK: lasp 160(%r1), 320(%r15)
+0xe5 0x00 0x10 0xa0 0xf1 0x40
+
+# CHECK: lasp 0(%r1), 4095
+0xe5 0x00 0x10 0x00 0x0f 0xff
+
+# CHECK: lasp 0(%r1), 4095(%r2)
+0xe5 0x00 0x10 0x00 0x2f 0xff
+
+# CHECK: lasp 0(%r1), 4095(%r15)
+0xe5 0x00 0x10 0x00 0xff 0xff
+
+# CHECK: lasp 0(%r1), 0
+0xe5 0x00 0x10 0x00 0x00 0x00
+
+# CHECK: lasp 0(%r15), 0
+0xe5 0x00 0xf0 0x00 0x00 0x00
+
+# CHECK: lasp 4095(%r1), 0
+0xe5 0x00 0x1f 0xff 0x00 0x00
+
+# CHECK: lasp 4095(%r15), 0
+0xe5 0x00 0xff 0xff 0x00 0x00
+
# CHECK: lat %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x9f
@@ -7453,6 +7930,24 @@
# CHECK: lbr %r15, %r0
0xb9 0x26 0x00 0xf0
+# CHECK: lcctl 0
+0xb2 0x84 0x00 0x00
+
+# CHECK: lcctl 0(%r1)
+0xb2 0x84 0x10 0x00
+
+# CHECK: lcctl 0(%r15)
+0xb2 0x84 0xf0 0x00
+
+# CHECK: lcctl 4095
+0xb2 0x84 0x0f 0xff
+
+# CHECK: lcctl 4095(%r1)
+0xb2 0x84 0x1f 0xff
+
+# CHECK: lcctl 4095(%r15)
+0xb2 0x84 0xff 0xff
+
# CHECK: lcdbr %f0, %f9
0xb3 0x13 0x00 0x09
@@ -7536,6 +8031,75 @@
# CHECK: lcr %r7, %r8
0x13 0x78
+# CHECK: lctl %c0, %c0, 0
+0xb7 0x00 0x00 0x00
+
+# CHECK: lctl %c0, %c15, 0
+0xb7 0x0f 0x00 0x00
+
+# CHECK: lctl %c14, %c15, 0
+0xb7 0xef 0x00 0x00
+
+# CHECK: lctl %c15, %c15, 0
+0xb7 0xff 0x00 0x00
+
+# CHECK: lctl %c0, %c0, 4095
+0xb7 0x00 0x0f 0xff
+
+# CHECK: lctl %c0, %c0, 1
+0xb7 0x00 0x00 0x01
+
+# CHECK: lctl %c0, %c0, 0(%r1)
+0xb7 0x00 0x10 0x00
+
+# CHECK: lctl %c0, %c0, 0(%r15)
+0xb7 0x00 0xf0 0x00
+
+# CHECK: lctl %c0, %c0, 4095(%r1)
+0xb7 0x00 0x1f 0xff
+
+# CHECK: lctl %c0, %c0, 4095(%r15)
+0xb7 0x00 0xff 0xff
+
+# CHECK: lctlg %c0, %c0, 0
+0xeb 0x00 0x00 0x00 0x00 0x2f
+
+# CHECK: lctlg %c0, %c15, 0
+0xeb 0x0f 0x00 0x00 0x00 0x2f
+
+# CHECK: lctlg %c14, %c15, 0
+0xeb 0xef 0x00 0x00 0x00 0x2f
+
+# CHECK: lctlg %c15, %c15, 0
+0xeb 0xff 0x00 0x00 0x00 0x2f
+
+# CHECK: lctlg %c0, %c0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x2f
+
+# CHECK: lctlg %c0, %c0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x2f
+
+# CHECK: lctlg %c0, %c0, 0
+0xeb 0x00 0x00 0x00 0x00 0x2f
+
+# CHECK: lctlg %c0, %c0, 1
+0xeb 0x00 0x00 0x01 0x00 0x2f
+
+# CHECK: lctlg %c0, %c0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0x2f
+
+# CHECK: lctlg %c0, %c0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0x2f
+
+# CHECK: lctlg %c0, %c0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0x2f
+
+# CHECK: lctlg %c0, %c0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x2f
+
+# CHECK: lctlg %c0, %c0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0x2f
+
# CHECK: lcxbr %f0, %f8
0xb3 0x43 0x00 0x08
@@ -9282,6 +9846,24 @@
# CHECK: locgr %r11, %r3, 15
0xb9 0xe2 0xf0 0xb3
+# CHECK: lpctl 0
+0xb2 0x85 0x00 0x00
+
+# CHECK: lpctl 0(%r1)
+0xb2 0x85 0x10 0x00
+
+# CHECK: lpctl 0(%r15)
+0xb2 0x85 0xf0 0x00
+
+# CHECK: lpctl 4095
+0xb2 0x85 0x0f 0xff
+
+# CHECK: lpctl 4095(%r1)
+0xb2 0x85 0x1f 0xff
+
+# CHECK: lpctl 4095(%r15)
+0xb2 0x85 0xff 0xff
+
# CHECK: lpd %r0, 0, 0
0xc8 0x04 0x00 0x00 0x00 0x00
@@ -9396,6 +9978,24 @@
# CHECK: lpgr %r7, %r8
0xb9 0x00 0x00 0x78
+# CHECK: lpp 0
+0xb2 0x80 0x00 0x00
+
+# CHECK: lpp 0(%r1)
+0xb2 0x80 0x10 0x00
+
+# CHECK: lpp 0(%r15)
+0xb2 0x80 0xf0 0x00
+
+# CHECK: lpp 4095
+0xb2 0x80 0x0f 0xff
+
+# CHECK: lpp 4095(%r1)
+0xb2 0x80 0x1f 0xff
+
+# CHECK: lpp 4095(%r15)
+0xb2 0x80 0xff 0xff
+
# CHECK: lpq %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x8f
@@ -9438,6 +10038,60 @@
# CHECK: lpr %r7, %r8
0x10 0x78
+# CHECK: lpsw 0
+0x82 0x00 0x00 0x00
+
+# CHECK: lpsw 0(%r1)
+0x82 0x00 0x10 0x00
+
+# CHECK: lpsw 0(%r15)
+0x82 0x00 0xf0 0x00
+
+# CHECK: lpsw 4095
+0x82 0x00 0x0f 0xff
+
+# CHECK: lpsw 4095(%r1)
+0x82 0x00 0x1f 0xff
+
+# CHECK: lpsw 4095(%r15)
+0x82 0x00 0xff 0xff
+
+# CHECK: lpswe 0
+0xb2 0xb2 0x00 0x00
+
+# CHECK: lpswe 0(%r1)
+0xb2 0xb2 0x10 0x00
+
+# CHECK: lpswe 0(%r15)
+0xb2 0xb2 0xf0 0x00
+
+# CHECK: lpswe 4095
+0xb2 0xb2 0x0f 0xff
+
+# CHECK: lpswe 4095(%r1)
+0xb2 0xb2 0x1f 0xff
+
+# CHECK: lpswe 4095(%r15)
+0xb2 0xb2 0xff 0xff
+
+# CHECK: lptea %r0, %r0, %r0, 0
+0xb9 0xaa 0x00 0x00
+
+# CHECK: lptea %r0, %r0, %r0, 15
+0xb9 0xaa 0x0f 0x00
+
+# CHECK: lptea %r0, %r0, %r15, 0
+0xb9 0xaa 0x00 0x0f
+
+# CHECK: lptea %r0, %r15, %r0, 0
+0xb9 0xaa 0xf0 0x00
+
+# CHECK: lptea %r4, %r5, %r6, 7
+0xb9 0xaa 0x57 0x46
+
+# CHECK: lptea %r15, %r0, %r0, 0
+0xb9 0xaa 0x00 0xf0
+
# CHECK: lpxbr %f0, %f8
0xb3 0x40 0x00 0x08
@@ -9474,6 +10128,87 @@
# CHECK: lr %r15, %r9
0x18 0xf9
+# CHECK: lra %r0, 0
+0xb1 0x00 0x00 0x00
+
+# CHECK: lra %r0, 4095
+0xb1 0x00 0x0f 0xff
+
+# CHECK: lra %r0, 0(%r1)
+0xb1 0x00 0x10 0x00
+
+# CHECK: lra %r0, 0(%r15)
+0xb1 0x00 0xf0 0x00
+
+# CHECK: lra %r0, 4095(%r1,%r15)
+0xb1 0x01 0xff 0xff
+
+# CHECK: lra %r0, 4095(%r15,%r1)
+0xb1 0x0f 0x1f 0xff
+
+# CHECK: lra %r15, 0
+0xb1 0xf0 0x00 0x00
+
+# CHECK: lrag %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x03
+
+# CHECK: lrag %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x03
+
+# CHECK: lrag %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x03
+
+# CHECK: lrag %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x03
+
+# CHECK: lrag %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x03
+
+# CHECK: lrag %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x03
+
+# CHECK: lrag %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x03
+
+# CHECK: lrag %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x03
+
+# CHECK: lrag %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x03
+
+# CHECK: lrag %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x03
+
+# CHECK: lray %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x13
+
+# CHECK: lray %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x13
+
+# CHECK: lray %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x13
+
+# CHECK: lray %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x13
+
+# CHECK: lray %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x13
+
+# CHECK: lray %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x13
+
+# CHECK: lray %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x13
+
+# CHECK: lray %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x13
+
+# CHECK: lray %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x13
+
+# CHECK: lray %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x13
+
# CHECK: lrv %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x1e
@@ -9594,6 +10329,24 @@
# CHECK: lrvr %r15, %r15
0xb9 0x1f 0x00 0xff
+# CHECK: lsctl 0
+0xb2 0x87 0x00 0x00
+
+# CHECK: lsctl 0(%r1)
+0xb2 0x87 0x10 0x00
+
+# CHECK: lsctl 0(%r15)
+0xb2 0x87 0xf0 0x00
+
+# CHECK: lsctl 4095
+0xb2 0x87 0x0f 0xff
+
+# CHECK: lsctl 4095(%r1)
+0xb2 0x87 0x1f 0xff
+
+# CHECK: lsctl 4095(%r15)
+0xb2 0x87 0xff 0xff
+
# CHECK: lt %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x12
@@ -9816,6 +10569,30 @@
# CHECK: ltxtr %f13, %f9
0xb3 0xde 0x00 0xd9
+# CHECK: lura %r0, %r0
+0xb2 0x4b 0x00 0x00
+
+# CHECK: lura %r0, %r15
+0xb2 0x4b 0x00 0x0f
+
+# CHECK: lura %r15, %r0
+0xb2 0x4b 0x00 0xf0
+
+# CHECK: lura %r7, %r8
+0xb2 0x4b 0x00 0x78
+
+# CHECK: lurag %r0, %r0
+0xb9 0x05 0x00 0x00
+
+# CHECK: lurag %r0, %r15
+0xb9 0x05 0x00 0x0f
+
+# CHECK: lurag %r15, %r0
+0xb9 0x05 0x00 0xf0
+
+# CHECK: lurag %r7, %r8
+0xb9 0x05 0x00 0x78
+
# CHECK: lxd %f0, 4095
0xed 0x00 0x0f 0xff 0x00 0x25
@@ -10887,6 +11664,24 @@
# CHECK: ms %r15, 0
0x71 0xf0 0x00 0x00
+# CHECK: msch 0
+0xb2 0x32 0x00 0x00
+
+# CHECK: msch 0(%r1)
+0xb2 0x32 0x10 0x00
+
+# CHECK: msch 0(%r15)
+0xb2 0x32 0xf0 0x00
+
+# CHECK: msch 4095
+0xb2 0x32 0x0f 0xff
+
+# CHECK: msch 4095(%r1)
+0xb2 0x32 0x1f 0xff
+
+# CHECK: msch 4095(%r15)
+0xb2 0x32 0xff 0xff
+
# CHECK: msd %f0, %f0, 0
0xed 0x00 0x00 0x00 0x00 0x3f
@@ -11199,6 +11994,15 @@
# CHECK: msr %r7, %r8
0xb2 0x52 0x00 0x78
+# CHECK: msta %r0
+0xb2 0x47 0x00 0x00
+
+# CHECK: msta %r2
+0xb2 0x47 0x00 0x20
+
+# CHECK: msta %r14
+0xb2 0x47 0x00 0xe0
+
# CHECK: msy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x51
@@ -11265,6 +12069,36 @@
# CHECK: mvc 0(256,%r15), 0
0xd2 0xff 0xf0 0x00 0x00 0x00
+# CHECK: mvcdk 0, 0
+0xe5 0x0f 0x00 0x00 0x00 0x00
+
+# CHECK: mvcdk 0(%r1), 0(%r2)
+0xe5 0x0f 0x10 0x00 0x20 0x00
+
+# CHECK: mvcdk 160(%r1), 320(%r15)
+0xe5 0x0f 0x10 0xa0 0xf1 0x40
+
+# CHECK: mvcdk 0(%r1), 4095
+0xe5 0x0f 0x10 0x00 0x0f 0xff
+
+# CHECK: mvcdk 0(%r1), 4095(%r2)
+0xe5 0x0f 0x10 0x00 0x2f 0xff
+
+# CHECK: mvcdk 0(%r1), 4095(%r15)
+0xe5 0x0f 0x10 0x00 0xff 0xff
+
+# CHECK: mvcdk 0(%r1), 0
+0xe5 0x0f 0x10 0x00 0x00 0x00
+
+# CHECK: mvcdk 0(%r15), 0
+0xe5 0x0f 0xf0 0x00 0x00 0x00
+
+# CHECK: mvcdk 4095(%r1), 0
+0xe5 0x0f 0x1f 0xff 0x00 0x00
+
+# CHECK: mvcdk 4095(%r15), 0
+0xe5 0x0f 0xff 0xff 0x00 0x00
+
# CHECK: mvcin 0(1), 0
0xe8 0x00 0x00 0x00 0x00 0x00
@@ -11385,6 +12219,132 @@
# CHECK: mvclu %r14, %r0, 0
0xeb 0xe0 0x00 0x00 0x00 0x8e
+# CHECK: mvcos 0, 0, %r0
+0xc8 0x00 0x00 0x00 0x00 0x00
+
+# CHECK: mvcos 0(%r1), 0(%r15), %r2
+0xc8 0x20 0x10 0x00 0xf0 0x00
+
+# CHECK: mvcos 1(%r1), 0(%r15), %r2
+0xc8 0x20 0x10 0x01 0xf0 0x00
+
+# CHECK: mvcos 4095(%r1), 0(%r15), %r2
+0xc8 0x20 0x1f 0xff 0xf0 0x00
+
+# CHECK: mvcos 0(%r1), 1(%r15), %r2
+0xc8 0x20 0x10 0x00 0xf0 0x01
+
+# CHECK: mvcos 0(%r1), 4095(%r15), %r2
+0xc8 0x20 0x10 0x00 0xff 0xff
+
+# CHECK: mvcp 0(%r0), 0, %r3
+0xda 0x03 0x00 0x00 0x00 0x00
+
+# CHECK: mvcp 0(%r1), 0, %r3
+0xda 0x13 0x00 0x00 0x00 0x00
+
+# CHECK: mvcp 0(%r1), 0(%r1), %r3
+0xda 0x13 0x00 0x00 0x10 0x00
+
+# CHECK: mvcp 0(%r1), 0(%r15), %r3
+0xda 0x13 0x00 0x00 0xf0 0x00
+
+# CHECK: mvcp 0(%r1), 4095, %r3
+0xda 0x13 0x00 0x00 0x0f 0xff
+
+# CHECK: mvcp 0(%r1), 4095(%r1), %r3
+0xda 0x13 0x00 0x00 0x1f 0xff
+
+# CHECK: mvcp 0(%r1), 4095(%r15), %r3
+0xda 0x13 0x00 0x00 0xff 0xff
+
+# CHECK: mvcp 0(%r2,%r1), 0, %r3
+0xda 0x23 0x10 0x00 0x00 0x00
+
+# CHECK: mvcp 0(%r2,%r15), 0, %r3
+0xda 0x23 0xf0 0x00 0x00 0x00
+
+# CHECK: mvcp 4095(%r2,%r1), 0, %r3
+0xda 0x23 0x1f 0xff 0x00 0x00
+
+# CHECK: mvcp 4095(%r2,%r15), 0, %r3
+0xda 0x23 0xff 0xff 0x00 0x00
+
+# CHECK: mvcp 0(%r2,%r1), 0, %r3
+0xda 0x23 0x10 0x00 0x00 0x00
+
+# CHECK: mvcp 0(%r2,%r15), 0, %r3
+0xda 0x23 0xf0 0x00 0x00 0x00
+
+# CHECK: mvcs 0(%r0), 0, %r3
+0xdb 0x03 0x00 0x00 0x00 0x00
+
+# CHECK: mvcs 0(%r1), 0, %r3
+0xdb 0x13 0x00 0x00 0x00 0x00
+
+# CHECK: mvcs 0(%r1), 0(%r1), %r3
+0xdb 0x13 0x00 0x00 0x10 0x00
+
+# CHECK: mvcs 0(%r1), 0(%r15), %r3
+0xdb 0x13 0x00 0x00 0xf0 0x00
+
+# CHECK: mvcs 0(%r1), 4095, %r3
+0xdb 0x13 0x00 0x00 0x0f 0xff
+
+# CHECK: mvcs 0(%r1), 4095(%r1), %r3
+0xdb 0x13 0x00 0x00 0x1f 0xff
+
+# CHECK: mvcs 0(%r1), 4095(%r15), %r3
+0xdb 0x13 0x00 0x00 0xff 0xff
+
+# CHECK: mvcs 0(%r2,%r1), 0, %r3
+0xdb 0x23 0x10 0x00 0x00 0x00
+
+# CHECK: mvcs 0(%r2,%r15), 0, %r3
+0xdb 0x23 0xf0 0x00 0x00 0x00
+
+# CHECK: mvcs 4095(%r2,%r1), 0, %r3
+0xdb 0x23 0x1f 0xff 0x00 0x00
+
+# CHECK: mvcs 4095(%r2,%r15), 0, %r3
+0xdb 0x23 0xff 0xff 0x00 0x00
+
+# CHECK: mvcs 0(%r2,%r1), 0, %r3
+0xdb 0x23 0x10 0x00 0x00 0x00
+
+# CHECK: mvcs 0(%r2,%r15), 0, %r3
+0xdb 0x23 0xf0 0x00 0x00 0x00
+
+# CHECK: mvcsk 0, 0
+0xe5 0x0e 0x00 0x00 0x00 0x00
+
+# CHECK: mvcsk 0(%r1), 0(%r2)
+0xe5 0x0e 0x10 0x00 0x20 0x00
+
+# CHECK: mvcsk 160(%r1), 320(%r15)
+0xe5 0x0e 0x10 0xa0 0xf1 0x40
+
+# CHECK: mvcsk 0(%r1), 4095
+0xe5 0x0e 0x10 0x00 0x0f 0xff
+
+# CHECK: mvcsk 0(%r1), 4095(%r2)
+0xe5 0x0e 0x10 0x00 0x2f 0xff
+
+# CHECK: mvcsk 0(%r1), 4095(%r15)
+0xe5 0x0e 0x10 0x00 0xff 0xff
+
+# CHECK: mvcsk 0(%r1), 0
+0xe5 0x0e 0x10 0x00 0x00 0x00
+
+# CHECK: mvcsk 0(%r15), 0
+0xe5 0x0e 0xf0 0x00 0x00 0x00
+
+# CHECK: mvcsk 4095(%r1), 0
+0xe5 0x0e 0x1f 0xff 0x00 0x00
+
+# CHECK: mvcsk 4095(%r15), 0
+0xe5 0x0e 0xff 0xff 0x00 0x00
+
# CHECK: mvghi 0, 0
0xe5 0x48 0x00 0x00 0x00 0x00
@@ -11613,6 +12573,18 @@
# CHECK: mvo 0(1), 0(16,%r15)
0xf1 0x0f 0x00 0x00 0xf0 0x00
+# CHECK: mvpg %r0, %r0
+0xb2 0x54 0x00 0x00
+
+# CHECK: mvpg %r0, %r15
+0xb2 0x54 0x00 0x0f
+
+# CHECK: mvpg %r15, %r0
+0xb2 0x54 0x00 0xf0
+
+# CHECK: mvpg %r7, %r8
+0xb2 0x54 0x00 0x78
+
# CHECK: mvst %r0, %r0
0xb2 0x55 0x00 0x00
@@ -12543,9 +13515,33 @@
# CHECK: pack 0(1), 0(16,%r15)
0xf2 0x0f 0x00 0x00 0xf0 0x00
+# CHECK: palb
+0xb2 0x48 0x00 0x00
+
+# CHECK: pc 0
+0xb2 0x18 0x00 0x00
+
+# CHECK: pc 0(%r1)
+0xb2 0x18 0x10 0x00
+
+# CHECK: pc 0(%r15)
+0xb2 0x18 0xf0 0x00
+
+# CHECK: pc 4095
+0xb2 0x18 0x0f 0xff
+
+# CHECK: pc 4095(%r1)
+0xb2 0x18 0x1f 0xff
+
+# CHECK: pc 4095(%r15)
+0xb2 0x18 0xff 0xff
+
# CHECK: pcc
0xb9 0x2c 0x00 0x00
+# CHECK: pckmo
+0xb9 0x28 0x00 0x00
+
# CHECK: pfd 0, -524288
0xe3 0x00 0x00 0x00 0x80 0x36
@@ -12576,9 +13572,54 @@
# CHECK: pfd 15, 0
0xe3 0xf0 0x00 0x00 0x00 0x36
+# CHECK: pfmf %r0, %r0
+0xb9 0xaf 0x00 0x00
+
+# CHECK: pfmf %r0, %r15
+0xb9 0xaf 0x00 0x0f
+
+# CHECK: pfmf %r15, %r0
+0xb9 0xaf 0x00 0xf0
+
+# CHECK: pfmf %r7, %r8
+0xb9 0xaf 0x00 0x78
+
+# CHECK: pfmf %r15, %r15
+0xb9 0xaf 0x00 0xff
+
# CHECK: pfpo
0x01 0x0a
+# CHECK: pgin %r0, %r0
+0xb2 0x2e 0x00 0x00
+
+# CHECK: pgin %r0, %r15
+0xb2 0x2e 0x00 0x0f
+
+# CHECK: pgin %r15, %r0
+0xb2 0x2e 0x00 0xf0
+
+# CHECK: pgin %r7, %r8
+0xb2 0x2e 0x00 0x78
+
+# CHECK: pgin %r15, %r15
+0xb2 0x2e 0x00 0xff
+
+# CHECK: pgout %r0, %r0
+0xb2 0x2f 0x00 0x00
+
+# CHECK: pgout %r0, %r15
+0xb2 0x2f 0x00 0x0f
+
+# CHECK: pgout %r15, %r0
+0xb2 0x2f 0x00 0xf0
+
+# CHECK: pgout %r7, %r8
+0xb2 0x2f 0x00 0x78
+
+# CHECK: pgout %r15, %r15
+0xb2 0x2f 0x00 0xff
+
# CHECK: pka 0, 0(1)
0xe9 0x00 0x00 0x00 0x00 0x00
@@ -12702,6 +13743,45 @@
# CHECK: pr
0x01 0x01
+# CHECK: pt %r0, %r0
+0xb2 0x28 0x00 0x00
+
+# CHECK: pt %r0, %r15
+0xb2 0x28 0x00 0x0f
+
+# CHECK: pt %r15, %r0
+0xb2 0x28 0x00 0xf0
+
+# CHECK: pt %r7, %r8
+0xb2 0x28 0x00 0x78
+
+# CHECK: ptf %r0
+0xb9 0xa2 0x00 0x00
+
+# CHECK: ptf %r1
+0xb9 0xa2 0x00 0x10
+
+# CHECK: ptf %r15
+0xb9 0xa2 0x00 0xf0
+
+# CHECK: ptff
+0x01 0x04
+
+# CHECK: pti %r0, %r0
+0xb9 0x9e 0x00 0x00
+
+# CHECK: pti %r0, %r15
+0xb9 0x9e 0x00 0x0f
+
+# CHECK: pti %r15, %r0
+0xb9 0x9e 0x00 0xf0
+
+# CHECK: pti %r7, %r8
+0xb9 0x9e 0x00 0x78
+
+# CHECK: ptlb
+0xb2 0x0d 0x00 0x00
+
# CHECK: qadtr %f0, %f0, %f0, 0
0xb3 0xf5 0x00 0x00
@@ -12738,6 +13818,45 @@
# CHECK: qaxtr %f13, %f0, %f0, 0
0xb3 0xfd 0x00 0xd0
+# CHECK: qctri 0
+0xb2 0x8e 0x00 0x00
+
+# CHECK: qctri 0(%r1)
+0xb2 0x8e 0x10 0x00
+
+# CHECK: qctri 0(%r15)
+0xb2 0x8e 0xf0 0x00
+
+# CHECK: qctri 4095
+0xb2 0x8e 0x0f 0xff
+
+# CHECK: qctri 4095(%r1)
+0xb2 0x8e 0x1f 0xff
+
+# CHECK: qctri 4095(%r15)
+0xb2 0x8e 0xff 0xff
+
+# CHECK: qsi 0
+0xb2 0x86 0x00 0x00
+
+# CHECK: qsi 0(%r1)
+0xb2 0x86 0x10 0x00
+
+# CHECK: qsi 0(%r15)
+0xb2 0x86 0xf0 0x00
+
+# CHECK: qsi 4095
+0xb2 0x86 0x0f 0xff
+
+# CHECK: qsi 4095(%r1)
+0xb2 0x86 0x1f 0xff
+
+# CHECK: qsi 4095(%r15)
+0xb2 0x86 0xff 0xff
+
+# CHECK: rchp
+0xb2 0x3b 0x00 0x00
+
# CHECK: risbg %r0, %r0, 0, 0, 0
0xec 0x00 0x00 0x00 0x00 0x55
@@ -12936,6 +14055,54 @@
# CHECK: rosbg %r4, %r5, 6, 7, 8
0xec 0x45 0x06 0x07 0x08 0x56
+# CHECK: rp 0
+0xb2 0x77 0x00 0x00
+
+# CHECK: rp 0(%r1)
+0xb2 0x77 0x10 0x00
+
+# CHECK: rp 0(%r15)
+0xb2 0x77 0xf0 0x00
+
+# CHECK: rp 4095
+0xb2 0x77 0x0f 0xff
+
+# CHECK: rp 4095(%r1)
+0xb2 0x77 0x1f 0xff
+
+# CHECK: rp 4095(%r15)
+0xb2 0x77 0xff 0xff
+
+# CHECK: rrbe %r0, %r0
+0xb2 0x2a 0x00 0x00
+
+# CHECK: rrbe %r0, %r15
+0xb2 0x2a 0x00 0x0f
+
+# CHECK: rrbe %r15, %r0
+0xb2 0x2a 0x00 0xf0
+
+# CHECK: rrbe %r7, %r8
+0xb2 0x2a 0x00 0x78
+
+# CHECK: rrbe %r15, %r15
+0xb2 0x2a 0x00 0xff
+
+# CHECK: rrbm %r0, %r0
+0xb9 0xae 0x00 0x00
+
+# CHECK: rrbm %r0, %r15
+0xb9 0xae 0x00 0x0f
+
+# CHECK: rrbm %r15, %r0
+0xb9 0xae 0x00 0xf0
+
+# CHECK: rrbm %r7, %r8
+0xb9 0xae 0x00 0x78
+
+# CHECK: rrbm %r15, %r15
+0xb9 0xae 0x00 0xff
+
# CHECK: rrdtr %f0, %f0, %f0, 0
0xb3 0xf7 0x00 0x00
@@ -12972,6 +14139,9 @@
# CHECK: rrxtr %f13, %f0, %f0, 0
0xb3 0xff 0x00 0xd0
+# CHECK: rsch
+0xb2 0x38 0x00 0x00
+
# CHECK: rxsbg %r0, %r0, 0, 0, 0
0xec 0x00 0x00 0x00 0x00 0x57
@@ -13014,6 +14184,45 @@
# CHECK: s %r15, 0
0x5b 0xf0 0x00 0x00
+# CHECK: sac 0
+0xb2 0x19 0x00 0x00
+
+# CHECK: sac 0(%r1)
+0xb2 0x19 0x10 0x00
+
+# CHECK: sac 0(%r15)
+0xb2 0x19 0xf0 0x00
+
+# CHECK: sac 4095
+0xb2 0x19 0x0f 0xff
+
+# CHECK: sac 4095(%r1)
+0xb2 0x19 0x1f 0xff
+
+# CHECK: sac 4095(%r15)
+0xb2 0x19 0xff 0xff
+
+# CHECK: sacf 0
+0xb2 0x79 0x00 0x00
+
+# CHECK: sacf 0(%r1)
+0xb2 0x79 0x10 0x00
+
+# CHECK: sacf 0(%r15)
+0xb2 0x79 0xf0 0x00
+
+# CHECK: sacf 4095
+0xb2 0x79 0x0f 0xff
+
+# CHECK: sacf 4095(%r1)
+0xb2 0x79 0x1f 0xff
+
+# CHECK: sacf 4095(%r15)
+0xb2 0x79 0xff 0xff
+
+# CHECK: sal
+0xb2 0x37 0x00 0x00
+
# CHECK: sam24
0x01 0x0c
@@ -13038,6 +14247,60 @@
# CHECK: sar %a15, %r15
0xb2 0x4e 0x00 0xff
+# CHECK: scctr %r0, %r0
+0xb2 0xe0 0x00 0x00
+
+# CHECK: scctr %r0, %r15
+0xb2 0xe0 0x00 0x0f
+
+# CHECK: scctr %r15, %r0
+0xb2 0xe0 0x00 0xf0
+
+# CHECK: scctr %r7, %r8
+0xb2 0xe0 0x00 0x78
+
+# CHECK: schm
+0xb2 0x3c 0x00 0x00
+
+# CHECK: sck 0
+0xb2 0x04 0x00 0x00
+
+# CHECK: sck 0(%r1)
+0xb2 0x04 0x10 0x00
+
+# CHECK: sck 0(%r15)
+0xb2 0x04 0xf0 0x00
+
+# CHECK: sck 4095
+0xb2 0x04 0x0f 0xff
+
+# CHECK: sck 4095(%r1)
+0xb2 0x04 0x1f 0xff
+
+# CHECK: sck 4095(%r15)
+0xb2 0x04 0xff 0xff
+
+# CHECK: sckc 0
+0xb2 0x06 0x00 0x00
+
+# CHECK: sckc 0(%r1)
+0xb2 0x06 0x10 0x00
+
+# CHECK: sckc 0(%r15)
+0xb2 0x06 0xf0 0x00
+
+# CHECK: sckc 4095
+0xb2 0x06 0x0f 0xff
+
+# CHECK: sckc 4095(%r1)
+0xb2 0x06 0x1f 0xff
+
+# CHECK: sckc 4095(%r15)
+0xb2 0x06 0xff 0xff
+
+# CHECK: sckpf
+0x01 0x07
+
# CHECK: sd %f0, 0
0x6b 0x00 0x00 0x00
@@ -13332,6 +14595,36 @@
# CHECK: sh %r15, 0
0x4b 0xf0 0x00 0x00
+# CHECK: shhhr %r0, %r0, %r0
+0xb9 0xc9 0x00 0x00
+
+# CHECK: shhhr %r0, %r0, %r15
+0xb9 0xc9 0xf0 0x00
+
+# CHECK: shhhr %r0, %r15, %r0
+0xb9 0xc9 0x00 0x0f
+
+# CHECK: shhhr %r15, %r0, %r0
+0xb9 0xc9 0x00 0xf0
+
+# CHECK: shhhr %r7, %r8, %r9
+0xb9 0xc9 0x90 0x78
+
+# CHECK: shhlr %r0, %r0, %r0
+0xb9 0xd9 0x00 0x00
+
+# CHECK: shhlr %r0, %r0, %r15
+0xb9 0xd9 0xf0 0x00
+
+# CHECK: shhlr %r0, %r15, %r0
+0xb9 0xd9 0x00 0x0f
+
+# CHECK: shhlr %r15, %r0, %r0
+0xb9 0xd9 0x00 0xf0
+
+# CHECK: shhlr %r7, %r8, %r9
+0xb9 0xd9 0x90 0x78
+
# CHECK: shy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x7b
@@ -13362,6 +14655,72 @@
# CHECK: shy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x7b
+# CHECK: sie 0
+0xb2 0x14 0x00 0x00
+
+# CHECK: sie 0(%r1)
+0xb2 0x14 0x10 0x00
+
+# CHECK: sie 0(%r15)
+0xb2 0x14 0xf0 0x00
+
+# CHECK: sie 4095
+0xb2 0x14 0x0f 0xff
+
+# CHECK: sie 4095(%r1)
+0xb2 0x14 0x1f 0xff
+
+# CHECK: sie 4095(%r15)
+0xb2 0x14 0xff 0xff
+
+# CHECK: siga 0
+0xb2 0x74 0x00 0x00
+
+# CHECK: siga 0(%r1)
+0xb2 0x74 0x10 0x00
+
+# CHECK: siga 0(%r15)
+0xb2 0x74 0xf0 0x00
+
+# CHECK: siga 4095
+0xb2 0x74 0x0f 0xff
+
+# CHECK: siga 4095(%r1)
+0xb2 0x74 0x1f 0xff
+
+# CHECK: siga 4095(%r15)
+0xb2 0x74 0xff 0xff
+
+# CHECK: sigp %r0, %r0, 0
+0xae 0x00 0x00 0x00
+
+# CHECK: sigp %r0, %r15, 0
+0xae 0x0f 0x00 0x00
+
+# CHECK: sigp %r14, %r15, 0
+0xae 0xef 0x00 0x00
+
+# CHECK: sigp %r15, %r15, 0
+0xae 0xff 0x00 0x00
+
+# CHECK: sigp %r0, %r0, 4095
+0xae 0x00 0x0f 0xff
+
+# CHECK: sigp %r0, %r0, 1
+0xae 0x00 0x00 0x01
+
+# CHECK: sigp %r0, %r0, 0(%r1)
+0xae 0x00 0x10 0x00
+
+# CHECK: sigp %r0, %r0, 0(%r15)
+0xae 0x00 0xf0 0x00
+
+# CHECK: sigp %r0, %r0, 4095(%r1)
+0xae 0x00 0x1f 0xff
+
+# CHECK: sigp %r0, %r0, 4095(%r15)
+0xae 0x00 0xff 0xff
+
# CHECK: sl %r0, 0
0x5f 0x00 0x00 0x00
@@ -13746,6 +15105,36 @@
# CHECK: slgrk %r2, %r3, %r4
0xb9 0xeb 0x40 0x23
+# CHECK: slhhhr %r0, %r0, %r0
+0xb9 0xcb 0x00 0x00
+
+# CHECK: slhhhr %r0, %r0, %r15
+0xb9 0xcb 0xf0 0x00
+
+# CHECK: slhhhr %r0, %r15, %r0
+0xb9 0xcb 0x00 0x0f
+
+# CHECK: slhhhr %r15, %r0, %r0
+0xb9 0xcb 0x00 0xf0
+
+# CHECK: slhhhr %r7, %r8, %r9
+0xb9 0xcb 0x90 0x78
+
+# CHECK: slhhlr %r0, %r0, %r0
+0xb9 0xdb 0x00 0x00
+
+# CHECK: slhhlr %r0, %r0, %r15
+0xb9 0xdb 0xf0 0x00
+
+# CHECK: slhhlr %r0, %r15, %r0
+0xb9 0xdb 0x00 0x0f
+
+# CHECK: slhhlr %r15, %r0, %r0
+0xb9 0xdb 0x00 0xf0
+
+# CHECK: slhhlr %r7, %r8, %r9
+0xb9 0xdb 0x90 0x78
+
# CHECK: sll %r0, 0
0x89 0x00 0x00 0x00
@@ -13959,6 +15348,36 @@
# CHECK: sp 0(1), 0(16,%r15)
0xfb 0x0f 0x00 0x00 0xf0 0x00
+# CHECK: spctr %r0, %r0
+0xb2 0xe1 0x00 0x00
+
+# CHECK: spctr %r0, %r15
+0xb2 0xe1 0x00 0x0f
+
+# CHECK: spctr %r15, %r0
+0xb2 0xe1 0x00 0xf0
+
+# CHECK: spctr %r7, %r8
+0xb2 0xe1 0x00 0x78
+
+# CHECK: spka 0
+0xb2 0x0a 0x00 0x00
+
+# CHECK: spka 0(%r1)
+0xb2 0x0a 0x10 0x00
+
+# CHECK: spka 0(%r15)
+0xb2 0x0a 0xf0 0x00
+
+# CHECK: spka 4095
+0xb2 0x0a 0x0f 0xff
+
+# CHECK: spka 4095(%r1)
+0xb2 0x0a 0x1f 0xff
+
+# CHECK: spka 4095(%r15)
+0xb2 0x0a 0xff 0xff
+
# CHECK: spm %r0
0x04 0x00
@@ -13968,6 +15387,42 @@
# CHECK: spm %r15
0x04 0xf0
+# CHECK: spt 0
+0xb2 0x08 0x00 0x00
+
+# CHECK: spt 0(%r1)
+0xb2 0x08 0x10 0x00
+
+# CHECK: spt 0(%r15)
+0xb2 0x08 0xf0 0x00
+
+# CHECK: spt 4095
+0xb2 0x08 0x0f 0xff
+
+# CHECK: spt 4095(%r1)
+0xb2 0x08 0x1f 0xff
+
+# CHECK: spt 4095(%r15)
+0xb2 0x08 0xff 0xff
+
+# CHECK: spx 0
+0xb2 0x10 0x00 0x00
+
+# CHECK: spx 0(%r1)
+0xb2 0x10 0x10 0x00
+
+# CHECK: spx 0(%r15)
+0xb2 0x10 0xf0 0x00
+
+# CHECK: spx 4095
+0xb2 0x10 0x0f 0xff
+
+# CHECK: spx 4095(%r1)
+0xb2 0x10 0x1f 0xff
+
+# CHECK: spx 4095(%r15)
+0xb2 0x10 0xff 0xff
+
# CHECK: sqd %f0, 0
0xed 0x00 0x00 0x00 0x00 0x35
@@ -14553,6 +16008,75 @@
# CHECK: srxt %f13, %f13, 0
0xed 0xd0 0x00 0x00 0xd0 0x49
+# CHECK: ssar %r0
+0xb2 0x25 0x00 0x00
+
+# CHECK: ssar %r1
+0xb2 0x25 0x00 0x10
+
+# CHECK: ssar %r15
+0xb2 0x25 0x00 0xf0
+
+# CHECK: ssair %r0
+0xb9 0x9f 0x00 0x00
+
+# CHECK: ssair %r1
+0xb9 0x9f 0x00 0x10
+
+# CHECK: ssair %r15
+0xb9 0x9f 0x00 0xf0
+
+# CHECK: ssch 0
+0xb2 0x33 0x00 0x00
+
+# CHECK: ssch 0(%r1)
+0xb2 0x33 0x10 0x00
+
+# CHECK: ssch 0(%r15)
+0xb2 0x33 0xf0 0x00
+
+# CHECK: ssch 4095
+0xb2 0x33 0x0f 0xff
+
+# CHECK: ssch 4095(%r1)
+0xb2 0x33 0x1f 0xff
+
+# CHECK: ssch 4095(%r15)
+0xb2 0x33 0xff 0xff
+
+# CHECK: sske %r0, %r0
+0xb2 0x2b 0x00 0x00
+
+# CHECK: sske %r0, %r15
+0xb2 0x2b 0x00 0x0f
+
+# CHECK: sske %r15, %r0
+0xb2 0x2b 0x00 0xf0
+
+# CHECK: sske %r0, %r0, 15
+0xb2 0x2b 0xf0 0x00
+
+# CHECK: sske %r4, %r6, 7
+0xb2 0x2b 0x70 0x46
+
+# CHECK: ssm 0
+0x80 0x00 0x00 0x00
+
+# CHECK: ssm 0(%r1)
+0x80 0x00 0x10 0x00
+
+# CHECK: ssm 0(%r15)
+0x80 0x00 0xf0 0x00
+
+# CHECK: ssm 4095
+0x80 0x00 0x0f 0xff
+
+# CHECK: ssm 4095(%r1)
+0x80 0x00 0x1f 0xff
+
+# CHECK: ssm 4095(%r15)
+0x80 0x00 0xff 0xff
+
# CHECK: st %r0, 0
0x50 0x00 0x00 0x00
@@ -14643,6 +16167,24 @@
# CHECK: stamy %a0, %a0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0x9b
+# CHECK: stap 0
+0xb2 0x12 0x00 0x00
+
+# CHECK: stap 0(%r1)
+0xb2 0x12 0x10 0x00
+
+# CHECK: stap 0(%r15)
+0xb2 0x12 0xf0 0x00
+
+# CHECK: stap 4095
+0xb2 0x12 0x0f 0xff
+
+# CHECK: stap 4095(%r1)
+0xb2 0x12 0x1f 0xff
+
+# CHECK: stap 4095(%r15)
+0xb2 0x12 0xff 0xff
+
# CHECK: stc %r0, 0
0x42 0x00 0x00 0x00
@@ -14712,6 +16254,24 @@
# CHECK: stck 4095(%r15)
0xb2 0x05 0xff 0xff
+# CHECK: stckc 0
+0xb2 0x07 0x00 0x00
+
+# CHECK: stckc 0(%r1)
+0xb2 0x07 0x10 0x00
+
+# CHECK: stckc 0(%r15)
+0xb2 0x07 0xf0 0x00
+
+# CHECK: stckc 4095
+0xb2 0x07 0x0f 0xff
+
+# CHECK: stckc 4095(%r1)
+0xb2 0x07 0x1f 0xff
+
+# CHECK: stckc 4095(%r15)
+0xb2 0x07 0xff 0xff
+
# CHECK: stcke 0
0xb2 0x78 0x00 0x00
@@ -14829,6 +16389,111 @@
# CHECK: stcmy %r15, 0, 0
0xeb 0xf0 0x00 0x00 0x00 0x2d
+# CHECK: stcps 0
+0xb2 0x3a 0x00 0x00
+
+# CHECK: stcps 0(%r1)
+0xb2 0x3a 0x10 0x00
+
+# CHECK: stcps 0(%r15)
+0xb2 0x3a 0xf0 0x00
+
+# CHECK: stcps 4095
+0xb2 0x3a 0x0f 0xff
+
+# CHECK: stcps 4095(%r1)
+0xb2 0x3a 0x1f 0xff
+
+# CHECK: stcps 4095(%r15)
+0xb2 0x3a 0xff 0xff
+
+# CHECK: stcrw 0
+0xb2 0x39 0x00 0x00
+
+# CHECK: stcrw 0(%r1)
+0xb2 0x39 0x10 0x00
+
+# CHECK: stcrw 0(%r15)
+0xb2 0x39 0xf0 0x00
+
+# CHECK: stcrw 4095
+0xb2 0x39 0x0f 0xff
+
+# CHECK: stcrw 4095(%r1)
+0xb2 0x39 0x1f 0xff
+
+# CHECK: stcrw 4095(%r15)
+0xb2 0x39 0xff 0xff
+
+# CHECK: stctg %c0, %c0, 0
+0xeb 0x00 0x00 0x00 0x00 0x25
+
+# CHECK: stctg %c0, %c15, 0
+0xeb 0x0f 0x00 0x00 0x00 0x25
+
+# CHECK: stctg %c14, %c15, 0
+0xeb 0xef 0x00 0x00 0x00 0x25
+
+# CHECK: stctg %c15, %c15, 0
+0xeb 0xff 0x00 0x00 0x00 0x25
+
+# CHECK: stctg %c0, %c0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x25
+
+# CHECK: stctg %c0, %c0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x25
+
+# CHECK: stctg %c0, %c0, 0
+0xeb 0x00 0x00 0x00 0x00 0x25
+
+# CHECK: stctg %c0, %c0, 1
+0xeb 0x00 0x00 0x01 0x00 0x25
+
+# CHECK: stctg %c0, %c0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0x25
+
+# CHECK: stctg %c0, %c0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0x25
+
+# CHECK: stctg %c0, %c0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0x25
+
+# CHECK: stctg %c0, %c0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x25
+
+# CHECK: stctg %c0, %c0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0x25
+
+# CHECK: stctl %c0, %c0, 0
+0xb6 0x00 0x00 0x00
+
+# CHECK: stctl %c0, %c15, 0
+0xb6 0x0f 0x00 0x00
+
+# CHECK: stctl %c14, %c15, 0
+0xb6 0xef 0x00 0x00
+
+# CHECK: stctl %c15, %c15, 0
+0xb6 0xff 0x00 0x00
+
+# CHECK: stctl %c0, %c0, 4095
+0xb6 0x00 0x0f 0xff
+
+# CHECK: stctl %c0, %c0, 1
+0xb6 0x00 0x00 0x01
+
+# CHECK: stctl %c0, %c0, 0(%r1)
+0xb6 0x00 0x10 0x00
+
+# CHECK: stctl %c0, %c0, 0(%r15)
+0xb6 0x00 0xf0 0x00
+
+# CHECK: stctl %c0, %c0, 4095(%r1)
+0xb6 0x00 0x1f 0xff
+
+# CHECK: stctl %c0, %c0, 4095(%r15)
+0xb6 0x00 0xff 0xff
+
# CHECK: stcy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x72
@@ -14991,6 +16656,24 @@
# CHECK: stfh %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0xcb
+# CHECK: stfl 0
+0xb2 0xb1 0x00 0x00
+
+# CHECK: stfl 0(%r1)
+0xb2 0xb1 0x10 0x00
+
+# CHECK: stfl 0(%r15)
+0xb2 0xb1 0xf0 0x00
+
+# CHECK: stfl 4095
+0xb2 0xb1 0x0f 0xff
+
+# CHECK: stfl 4095(%r1)
+0xb2 0xb1 0x1f 0xff
+
+# CHECK: stfl 4095(%r15)
+0xb2 0xb1 0xff 0xff
+
# CHECK: stfle 0
0xb2 0xb0 0x00 0x00
@@ -15138,6 +16821,24 @@
# CHECK: sthy %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x70
+# CHECK: stidp 0
+0xb2 0x02 0x00 0x00
+
+# CHECK: stidp 0(%r1)
+0xb2 0x02 0x10 0x00
+
+# CHECK: stidp 0(%r15)
+0xb2 0x02 0xf0 0x00
+
+# CHECK: stidp 4095
+0xb2 0x02 0x0f 0xff
+
+# CHECK: stidp 4095(%r1)
+0xb2 0x02 0x1f 0xff
+
+# CHECK: stidp 4095(%r15)
+0xb2 0x02 0xff 0xff
+
# CHECK: stm %r0, %r0, 0
0x90 0x00 0x00 0x00
@@ -15285,6 +16986,27 @@
# CHECK: stmy %r0, %r0, 524287(%r15)
0xeb 0x00 0xff 0xff 0x7f 0x90
+# CHECK: stnsm 0, 0
+0xac 0x00 0x00 0x00
+
+# CHECK: stnsm 4095, 0
+0xac 0x00 0x0f 0xff
+
+# CHECK: stnsm 0, 255
+0xac 0xff 0x00 0x00
+
+# CHECK: stnsm 0(%r1), 42
+0xac 0x2a 0x10 0x00
+
+# CHECK: stnsm 0(%r15), 42
+0xac 0x2a 0xf0 0x00
+
+# CHECK: stnsm 4095(%r1), 42
+0xac 0x2a 0x1f 0xff
+
+# CHECK: stnsm 4095(%r15), 42
+0xac 0x2a 0xff 0xff
+
# CHECK: stoc %r1, 2(%r3), 0
0xeb 0x10 0x30 0x02 0x00 0xf3
@@ -15381,6 +17103,27 @@
# CHECK: stocg %r1, 2(%r3), 15
0xeb 0x1f 0x30 0x02 0x00 0xe3
+# CHECK: stosm 0, 0
+0xad 0x00 0x00 0x00
+
+# CHECK: stosm 4095, 0
+0xad 0x00 0x0f 0xff
+
+# CHECK: stosm 0, 255
+0xad 0xff 0x00 0x00
+
+# CHECK: stosm 0(%r1), 42
+0xad 0x2a 0x10 0x00
+
+# CHECK: stosm 0(%r15), 42
+0xad 0x2a 0xf0 0x00
+
+# CHECK: stosm 4095(%r1), 42
+0xad 0x2a 0x1f 0xff
+
+# CHECK: stosm 4095(%r15), 42
+0xad 0x2a 0xff 0xff
+
# CHECK: stpq %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x8e
@@ -15411,6 +17154,42 @@
# CHECK: stpq %r14, 0
0xe3 0xe0 0x00 0x00 0x00 0x8e
+# CHECK: stpt 0
+0xb2 0x09 0x00 0x00
+
+# CHECK: stpt 0(%r1)
+0xb2 0x09 0x10 0x00
+
+# CHECK: stpt 0(%r15)
+0xb2 0x09 0xf0 0x00
+
+# CHECK: stpt 4095
+0xb2 0x09 0x0f 0xff
+
+# CHECK: stpt 4095(%r1)
+0xb2 0x09 0x1f 0xff
+
+# CHECK: stpt 4095(%r15)
+0xb2 0x09 0xff 0xff
+
+# CHECK: stpx 0
+0xb2 0x11 0x00 0x00
+
+# CHECK: stpx 0(%r1)
+0xb2 0x11 0x10 0x00
+
+# CHECK: stpx 0(%r15)
+0xb2 0x11 0xf0 0x00
+
+# CHECK: stpx 4095
+0xb2 0x11 0x0f 0xff
+
+# CHECK: stpx 4095(%r1)
+0xb2 0x11 0x1f 0xff
+
+# CHECK: stpx 4095(%r15)
+0xb2 0x11 0xff 0xff
+
# CHECK: strag 0, 0
0xe5 0x02 0x00 0x00 0x00 0x00
@@ -15519,6 +17298,66 @@
# CHECK: strvh %r15, 0
0xe3 0xf0 0x00 0x00 0x00 0x3f
+# CHECK: stsch 0
+0xb2 0x34 0x00 0x00
+
+# CHECK: stsch 0(%r1)
+0xb2 0x34 0x10 0x00
+
+# CHECK: stsch 0(%r15)
+0xb2 0x34 0xf0 0x00
+
+# CHECK: stsch 4095
+0xb2 0x34 0x0f 0xff
+
+# CHECK: stsch 4095(%r1)
+0xb2 0x34 0x1f 0xff
+
+# CHECK: stsch 4095(%r15)
+0xb2 0x34 0xff 0xff
+
+# CHECK: stsi 0
+0xb2 0x7d 0x00 0x00
+
+# CHECK: stsi 0(%r1)
+0xb2 0x7d 0x10 0x00
+
+# CHECK: stsi 0(%r15)
+0xb2 0x7d 0xf0 0x00
+
+# CHECK: stsi 4095
+0xb2 0x7d 0x0f 0xff
+
+# CHECK: stsi 4095(%r1)
+0xb2 0x7d 0x1f 0xff
+
+# CHECK: stsi 4095(%r15)
+0xb2 0x7d 0xff 0xff
+
+# CHECK: stura %r0, %r0
+0xb2 0x46 0x00 0x00
+
+# CHECK: stura %r0, %r15
+0xb2 0x46 0x00 0x0f
+
+# CHECK: stura %r15, %r0
+0xb2 0x46 0x00 0xf0
+
+# CHECK: stura %r7, %r8
+0xb2 0x46 0x00 0x78
+
+# CHECK: sturg %r0, %r0
+0xb9 0x25 0x00 0x00
+
+# CHECK: sturg %r0, %r15
+0xb9 0x25 0x00 0x0f
+
+# CHECK: sturg %r15, %r0
+0xb9 0x25 0x00 0xf0
+
+# CHECK: sturg %r7, %r8
+0xb9 0x25 0x00 0x78
+
# CHECK: sty %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x50
@@ -15735,6 +17574,33 @@
# CHECK: tam
0x01 0x0b
+# CHECK: tar %a0, %r0
+0xb2 0x4c 0x00 0x00
+
+# CHECK: tar %a0, %r15
+0xb2 0x4c 0x00 0x0f
+
+# CHECK: tar %a15, %r0
+0xb2 0x4c 0x00 0xf0
+
+# CHECK: tar %a7, %r8
+0xb2 0x4c 0x00 0x78
+
+# CHECK: tb %r0, %r0
+0xb2 0x2c 0x00 0x00
+
+# CHECK: tb %r0, %r15
+0xb2 0x2c 0x00 0x0f
+
+# CHECK: tb %r15, %r0
+0xb2 0x2c 0x00 0xf0
+
+# CHECK: tb %r7, %r8
+0xb2 0x2c 0x00 0x78
+
+# CHECK: tb %r15, %r15
+0xb2 0x2c 0x00 0xff
+
# CHECK: tbdr %f0, 0, %f0
0xb3 0x51 0x00 0x00
@@ -16167,6 +18033,54 @@
# CHECK: tp 0(16,%r15)
0xeb 0xf0 0xf0 0x00 0x00 0xc0
+# CHECK: tpi 0
+0xb2 0x36 0x00 0x00
+
+# CHECK: tpi 0(%r1)
+0xb2 0x36 0x10 0x00
+
+# CHECK: tpi 0(%r15)
+0xb2 0x36 0xf0 0x00
+
+# CHECK: tpi 4095
+0xb2 0x36 0x0f 0xff
+
+# CHECK: tpi 4095(%r1)
+0xb2 0x36 0x1f 0xff
+
+# CHECK: tpi 4095(%r15)
+0xb2 0x36 0xff 0xff
+
+# CHECK: tprot 0, 0
+0xe5 0x01 0x00 0x00 0x00 0x00
+
+# CHECK: tprot 0(%r1), 0(%r2)
+0xe5 0x01 0x10 0x00 0x20 0x00
+
+# CHECK: tprot 160(%r1), 320(%r15)
+0xe5 0x01 0x10 0xa0 0xf1 0x40
+
+# CHECK: tprot 0(%r1), 4095
+0xe5 0x01 0x10 0x00 0x0f 0xff
+
+# CHECK: tprot 0(%r1), 4095(%r2)
+0xe5 0x01 0x10 0x00 0x2f 0xff
+
+# CHECK: tprot 0(%r1), 4095(%r15)
+0xe5 0x01 0x10 0x00 0xff 0xff
+
+# CHECK: tprot 0(%r1), 0
+0xe5 0x01 0x10 0x00 0x00 0x00
+
+# CHECK: tprot 0(%r15), 0
+0xe5 0x01 0xf0 0x00 0x00 0x00
+
+# CHECK: tprot 4095(%r1), 0
+0xe5 0x01 0x1f 0xff 0x00 0x00
+
+# CHECK: tprot 4095(%r15), 0
+0xe5 0x01 0xff 0xff 0x00 0x00
+
# CHECK: tr 0(1), 0
0xdc 0x00 0x00 0x00 0x00 0x00
@@ -16203,6 +18117,96 @@
# CHECK: tr 0(256,%r15), 0
0xdc 0xff 0xf0 0x00 0x00 0x00
+# CHECK: trace %r0, %r0, 0
+0x99 0x00 0x00 0x00
+
+# CHECK: trace %r0, %r15, 0
+0x99 0x0f 0x00 0x00
+
+# CHECK: trace %r14, %r15, 0
+0x99 0xef 0x00 0x00
+
+# CHECK: trace %r15, %r15, 0
+0x99 0xff 0x00 0x00
+
+# CHECK: trace %r0, %r0, 4095
+0x99 0x00 0x0f 0xff
+
+# CHECK: trace %r0, %r0, 1
+0x99 0x00 0x00 0x01
+
+# CHECK: trace %r0, %r0, 0(%r1)
+0x99 0x00 0x10 0x00
+
+# CHECK: trace %r0, %r0, 0(%r15)
+0x99 0x00 0xf0 0x00
+
+# CHECK: trace %r0, %r0, 4095(%r1)
+0x99 0x00 0x1f 0xff
+
+# CHECK: trace %r0, %r0, 4095(%r15)
+0x99 0x00 0xff 0xff
+
+# CHECK: tracg %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0x0f
+
+# CHECK: tracg %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0x0f
+
+# CHECK: tracg %r14, %r15, 0
+0xeb 0xef 0x00 0x00 0x00 0x0f
+
+# CHECK: tracg %r15, %r15, 0
+0xeb 0xff 0x00 0x00 0x00 0x0f
+
+# CHECK: tracg %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x0f
+
+# CHECK: tracg %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0x0f
+
+# CHECK: tracg %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0x0f
+
+# CHECK: tracg %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0x0f
+
+# CHECK: tracg %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0x0f
+
+# CHECK: tracg %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0x0f
+
+# CHECK: tracg %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0x0f
+
+# CHECK: tracg %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0x0f
+
+# CHECK: tracg %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0x0f
+
+# CHECK: trap2
+0x01 0xff
+
+# CHECK: trap4 0
+0xb2 0xff 0x00 0x00
+
+# CHECK: trap4 0(%r1)
+0xb2 0xff 0x10 0x00
+
+# CHECK: trap4 0(%r15)
+0xb2 0xff 0xf0 0x00
+
+# CHECK: trap4 4095
+0xb2 0xff 0x0f 0xff
+
+# CHECK: trap4 4095(%r1)
+0xb2 0xff 0x1f 0xff
+
+# CHECK: trap4 4095(%r15)
+0xb2 0xff 0xff 0xff
+
# CHECK: tre %r0, %r0
0xb2 0xa5 0x00 0x00
@@ -16413,6 +18417,24 @@
# CHECK: ts 4095(%r15)
0x93 0x00 0xff 0xff
+# CHECK: tsch 0
+0xb2 0x35 0x00 0x00
+
+# CHECK: tsch 0(%r1)
+0xb2 0x35 0x10 0x00
+
+# CHECK: tsch 0(%r15)
+0xb2 0x35 0xf0 0x00
+
+# CHECK: tsch 4095
+0xb2 0x35 0x0f 0xff
+
+# CHECK: tsch 4095(%r1)
+0xb2 0x35 0x1f 0xff
+
+# CHECK: tsch 4095(%r15)
+0xb2 0x35 0xff 0xff
+
# CHECK: unpk 0(1), 0(1)
0xf3 0x00 0x00 0x00 0x00 0x00
@@ -16722,6 +18744,9 @@
# CHECK: xrk %r2, %r3, %r4
0xb9 0xf7 0x40 0x23
+# CHECK: xsch
+0xb2 0x76 0x00 0x00
+
# CHECK: xy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x57
diff --git a/test/MC/Mips/macro-dla-bad.s b/test/MC/Mips/macro-dla-bad.s
new file mode 100644
index 0000000000000..cd377f4557ca6
--- /dev/null
+++ b/test/MC/Mips/macro-dla-bad.s
@@ -0,0 +1,21 @@
+# RUN: not llvm-mc %s -arch=mips64 -mcpu=mips3 -target-abi n64 2>&1 | \
+# RUN: FileCheck %s
+
+ .text
+ .option pic2
+ dla $5, symbol+0x8000
+ # CHECK: :[[@LINE-1]]:3: error: macro instruction uses large offset, which is not currently supported
+ dla $5, symbol-0x8001
+ # CHECK: :[[@LINE-1]]:3: error: macro instruction uses large offset, which is not currently supported
+ dla $5, symbol+0x8000($6)
+ # CHECK: :[[@LINE-1]]:3: error: macro instruction uses large offset, which is not currently supported
+ dla $5, symbol-0x8001($6)
+ # CHECK: :[[@LINE-1]]:3: error: macro instruction uses large offset, which is not currently supported
+ dla $25, symbol+0x8000
+ # CHECK: :[[@LINE-1]]:3: error: macro instruction uses large offset, which is not currently supported
+ dla $25, symbol-0x8001
+ # CHECK: :[[@LINE-1]]:3: error: macro instruction uses large offset, which is not currently supported
+ dla $25, symbol+0x8000($6)
+ # CHECK: :[[@LINE-1]]:3: error: macro instruction uses large offset, which is not currently supported
+ dla $25, symbol-0x8001($6)
+ # CHECK: :[[@LINE-1]]:3: error: macro instruction uses large offset, which is not currently supported
diff --git a/test/MC/Mips/macro-dla-pic.s b/test/MC/Mips/macro-dla-pic.s
new file mode 100644
index 0000000000000..ed5aa202618ef
--- /dev/null
+++ b/test/MC/Mips/macro-dla-pic.s
@@ -0,0 +1,50 @@
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips3 | \
+# RUN: FileCheck %s
+
+.option pic2
+dla $5, symbol # CHECK: ld $5, %got_disp(symbol)($gp) # encoding: [0xdf,0x85,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+dla $5, symbol($6) # CHECK: ld $5, %got_disp(symbol)($gp) # encoding: [0xdf,0x85,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddu $5, $5, $6 # encoding: [0x00,0xa6,0x28,0x2d]
+dla $6, symbol($6) # CHECK: ld $1, %got_disp(symbol)($gp) # encoding: [0xdf,0x81,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddu $6, $1, $6 # encoding: [0x00,0x26,0x30,0x2d]
+dla $5, symbol+8 # CHECK: ld $5, %got_disp(symbol)($gp) # encoding: [0xdf,0x85,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddiu $5, $5, 8 # encoding: [0x64,0xa5,0x00,0x08]
+dla $5, symbol+8($6) # CHECK: ld $5, %got_disp(symbol)($gp) # encoding: [0xdf,0x85,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddiu $5, $5, 8 # encoding: [0x64,0xa5,0x00,0x08]
+ # CHECK: daddu $5, $5, $6 # encoding: [0x00,0xa6,0x28,0x2d]
+dla $6, symbol+8($6) # CHECK: ld $1, %got_disp(symbol)($gp) # encoding: [0xdf,0x81,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddiu $1, $1, 8 # encoding: [0x64,0x21,0x00,0x08]
+ # CHECK: daddu $6, $1, $6 # encoding: [0x00,0x26,0x30,0x2d]
+dla $5, 1f # CHECK: ld $5, %got_disp(.Ltmp0)($gp) # encoding: [0xdf,0x85,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(.Ltmp0), kind: fixup_Mips_GOT_DISP
+1:
+
+# PIC expansions involving $25 are special.
+dla $25, symbol # CHECK: ld $25, %call16(symbol)($gp) # encoding: [0xdf,0x99,A,A]
+ # CHECK: # fixup A - offset: 0, value: %call16(symbol), kind: fixup_Mips_CALL16
+dla $25, symbol($6) # CHECK: ld $25, %got_disp(symbol)($gp) # encoding: [0xdf,0x99,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddu $25, $25, $6 # encoding: [0x03,0x26,0xc8,0x2d]
+dla $25, symbol($25) # CHECK: ld $1, %got_disp(symbol)($gp) # encoding: [0xdf,0x81,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddu $25, $1, $25 # encoding: [0x00,0x39,0xc8,0x2d]
+dla $25, symbol+8 # CHECK: ld $25, %got_disp(symbol)($gp) # encoding: [0xdf,0x99,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddiu $25, $25, 8 # encoding: [0x67,0x39,0x00,0x08]
+dla $25, symbol+8($6) # CHECK: ld $25, %got_disp(symbol)($gp) # encoding: [0xdf,0x99,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddiu $25, $25, 8 # encoding: [0x67,0x39,0x00,0x08]
+ # CHECK: daddu $25, $25, $6 # encoding: [0x03,0x26,0xc8,0x2d]
+dla $25, symbol+8($25)# CHECK: ld $1, %got_disp(symbol)($gp) # encoding: [0xdf,0x81,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(symbol), kind: fixup_Mips_GOT_DISP
+ # CHECK: daddiu $1, $1, 8 # encoding: [0x64,0x21,0x00,0x08]
+ # CHECK: daddu $25, $1, $25 # encoding: [0x00,0x39,0xc8,0x2d]
+dla $25, 1f # CHECK: ld $25, %got_disp(.Ltmp1)($gp) # encoding: [0xdf,0x99,A,A]
+ # CHECK: # fixup A - offset: 0, value: %got_disp(.Ltmp1), kind: fixup_Mips_GOT_DISP
+1:
diff --git a/test/MC/Mips/micromips64r6/valid.s b/test/MC/Mips/micromips64r6/valid.s
index d757384344d41..3ead62fc61692 100644
--- a/test/MC/Mips/micromips64r6/valid.s
+++ b/test/MC/Mips/micromips64r6/valid.s
@@ -24,6 +24,10 @@ a:
dextm $9, $6, 3, 39 # CHECK: dextm $9, $6, 3, 39 # encoding: [0x59,0x26,0x30,0xe4]
dextu $9, $6, 35, 7 # CHECK: dextu $9, $6, 35, 7 # encoding: [0x59,0x26,0x30,0xd4]
dalign $4, $2, $3, 5 # CHECK: dalign $4, $2, $3, 5 # encoding: [0x58,0x43,0x25,0x1c]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x58,0x85,0x20,0x10]
+ dsll $4, $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x58,0x85,0x20,0x10]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x58,0x85,0x20,0x50]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x58,0x85,0x20,0x50]
ldpc $2, 16 # CHECK: ldpc $2, 16 # encoding: [0x78,0x58,0x00,0x02]
lw $3, 32($gp) # CHECK: lw $3, 32($gp) # encoding: [0x65,0x88]
lw $3, 24($sp) # CHECK: lw $3, 24($sp) # encoding: [0x48,0x66]
diff --git a/test/MC/Mips/mips3/valid.s b/test/MC/Mips/mips3/valid.s
index 1f0e22dbe3f9e..694952b3a2ba4 100644
--- a/test/MC/Mips/mips3/valid.s
+++ b/test/MC/Mips/mips3/valid.s
@@ -83,6 +83,8 @@ a:
dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x14]
+ dsll $4, $5, $5 # CHECK: dsllv $4, $5, $5 # encoding: [0x00,0xa5,0x20,0x14]
dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
@@ -95,6 +97,8 @@ a:
dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
diff --git a/test/MC/Mips/mips4/valid.s b/test/MC/Mips/mips4/valid.s
index 69cea599e7487..68f269c1c8dd2 100644
--- a/test/MC/Mips/mips4/valid.s
+++ b/test/MC/Mips/mips4/valid.s
@@ -115,6 +115,8 @@ a:
dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x14]
+ dsll $4, $5, $5 # CHECK: dsllv $4, $5, $5 # encoding: [0x00,0xa5,0x20,0x14]
dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
@@ -127,6 +129,8 @@ a:
dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
diff --git a/test/MC/Mips/mips5/valid.s b/test/MC/Mips/mips5/valid.s
index 85fdfb507aad2..ad58a15ca8cef 100644
--- a/test/MC/Mips/mips5/valid.s
+++ b/test/MC/Mips/mips5/valid.s
@@ -115,6 +115,8 @@ a:
dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x14]
+ dsll $4, $5, $5 # CHECK: dsllv $4, $5, $5 # encoding: [0x00,0xa5,0x20,0x14]
dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
@@ -127,6 +129,8 @@ a:
dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
diff --git a/test/MC/Mips/mips64/valid.s b/test/MC/Mips/mips64/valid.s
index 716488df7b500..d40b784a789dc 100644
--- a/test/MC/Mips/mips64/valid.s
+++ b/test/MC/Mips/mips64/valid.s
@@ -122,6 +122,8 @@ a:
dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x14]
+ dsll $4, $5, $5 # CHECK: dsllv $4, $5, $5 # encoding: [0x00,0xa5,0x20,0x14]
dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
@@ -134,6 +136,8 @@ a:
dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
diff --git a/test/MC/Mips/mips64r2/valid.s b/test/MC/Mips/mips64r2/valid.s
index 656b76c758eb9..0826391fcaaff 100644
--- a/test/MC/Mips/mips64r2/valid.s
+++ b/test/MC/Mips/mips64r2/valid.s
@@ -131,6 +131,8 @@ a:
dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x14]
+ dsll $4, $5, $5 # CHECK: dsllv $4, $5, $5 # encoding: [0x00,0xa5,0x20,0x14]
dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
@@ -143,6 +145,8 @@ a:
dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
diff --git a/test/MC/Mips/mips64r3/valid.s b/test/MC/Mips/mips64r3/valid.s
index 52d44da8f56f0..4bdef443d70b9 100644
--- a/test/MC/Mips/mips64r3/valid.s
+++ b/test/MC/Mips/mips64r3/valid.s
@@ -131,6 +131,8 @@ a:
dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x14]
+ dsll $4, $5, $5 # CHECK: dsllv $4, $5, $5 # encoding: [0x00,0xa5,0x20,0x14]
dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
@@ -143,6 +145,8 @@ a:
dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
diff --git a/test/MC/Mips/mips64r5/valid.s b/test/MC/Mips/mips64r5/valid.s
index f400436b696ff..4215f67daa0c8 100644
--- a/test/MC/Mips/mips64r5/valid.s
+++ b/test/MC/Mips/mips64r5/valid.s
@@ -131,6 +131,8 @@ a:
dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x14]
+ dsll $4, $5, $5 # CHECK: dsllv $4, $5, $5 # encoding: [0x00,0xa5,0x20,0x14]
dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
@@ -143,6 +145,8 @@ a:
dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
diff --git a/test/MC/Mips/mips64r6/valid.s b/test/MC/Mips/mips64r6/valid.s
index a86b3c915e855..486738baa84d7 100644
--- a/test/MC/Mips/mips64r6/valid.s
+++ b/test/MC/Mips/mips64r6/valid.s
@@ -125,9 +125,13 @@ a:
dmuhu $2,$3,$4 # CHECK: dmuhu $2, $3, $4 # encoding: [0x00,0x64,0x10,0xdd]
dmul $2,$3,$4 # CHECK: dmul $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9c]
dmulu $2,$3,$4 # CHECK: dmulu $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9d]
- dneg $2 # CHECK: dneg $2, $2 # encoding: [0x00,0x02,0x10,0x2e]
- dneg $2,$3 # CHECK: dneg $2, $3 # encoding: [0x00,0x03,0x10,0x2e]
- dnegu $2,$3 # CHECK: dnegu $2, $3 # encoding: [0x00,0x03,0x10,0x2f]
+ dneg $2 # CHECK: dneg $2, $2 # encoding: [0x00,0x02,0x10,0x2e]
+ dneg $2,$3 # CHECK: dneg $2, $3 # encoding: [0x00,0x03,0x10,0x2e]
+ dnegu $2,$3 # CHECK: dnegu $2, $3 # encoding: [0x00,0x03,0x10,0x2f]
+ dsll $4, $5 # CHECK: dsllv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x14]
+ dsll $4, $5, $5 # CHECK: dsllv $4, $5, $5 # encoding: [0x00,0xa5,0x20,0x14]
+ dsrl $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
+ dsrl $4, $4, $5 # CHECK: dsrlv $4, $4, $5 # encoding: [0x00,0xa4,0x20,0x16]
dsubu $14,-4586 # CHECK: daddiu $14, $14, 4586 # encoding: [0x65,0xce,0x11,0xea]
dsubu $15,$11,5025 # CHECK: daddiu $15, $11, -5025 # encoding: [0x65,0x6f,0xec,0x5f]
dvp $4 # CHECK: dvp $4 # encoding: [0x41,0x64,0x00,0x24]
diff --git a/test/MC/SystemZ/insn-bad-z196.s b/test/MC/SystemZ/insn-bad-z196.s
index 78d50bca9746f..33059529f74b1 100644
--- a/test/MC/SystemZ/insn-bad-z196.s
+++ b/test/MC/SystemZ/insn-bad-z196.s
@@ -43,6 +43,22 @@
aih %r0, (1 << 31)
#CHECK: error: invalid operand
+#CHECK: alsih %r0, (-1 << 31) - 1
+#CHECK: error: invalid operand
+#CHECK: alsih %r0, (1 << 31)
+
+ alsih %r0, (-1 << 31) - 1
+ alsih %r0, (1 << 31)
+
+#CHECK: error: invalid operand
+#CHECK: alsihn %r0, (-1 << 31) - 1
+#CHECK: error: invalid operand
+#CHECK: alsihn %r0, (1 << 31)
+
+ alsihn %r0, (-1 << 31) - 1
+ alsihn %r0, (1 << 31)
+
+#CHECK: error: invalid operand
#CHECK: axtra %f0, %f0, %f0, -1
#CHECK: error: invalid operand
#CHECK: axtra %f0, %f0, %f0, 16
@@ -592,6 +608,11 @@
clih %r0, -1
clih %r0, (1 << 32)
+#CHECK: error: instruction requires: enhanced-dat-2
+#CHECK: crdte %r0, %r0, %r0, 0
+
+ crdte %r0, %r0, %r0, 0
+
#CHECK: error: invalid operand
#CHECK: cxfbra %f0, 0, %r0, -1
#CHECK: error: invalid operand
diff --git a/test/MC/SystemZ/insn-bad-zEC12.s b/test/MC/SystemZ/insn-bad-zEC12.s
index 80197a3c1ef15..552180a7c6107 100644
--- a/test/MC/SystemZ/insn-bad-zEC12.s
+++ b/test/MC/SystemZ/insn-bad-zEC12.s
@@ -162,6 +162,20 @@
cpxt %f0, 0(1), 0
+#CHECK: error: invalid register pair
+#CHECK: crdte %r1, %r0, %r0, 0
+#CHECK: error: invalid register pair
+#CHECK: crdte %r0, %r0, %r1, 0
+#CHECK: error: invalid operand
+#CHECK: crdte %r0, %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: crdte %r0, %r0, %r0, 16
+
+ crdte %r1, %r0, %r0, 0
+ crdte %r0, %r0, %r1, 0
+ crdte %r0, %r0, %r0, -1
+ crdte %r0, %r0, %r0, 16
+
#CHECK: error: instruction requires: dfp-packed-conversion
#CHECK: cxpt %f0, 0(1), 0
diff --git a/test/MC/SystemZ/insn-bad.s b/test/MC/SystemZ/insn-bad.s
index 259ad05e5f4af..57c69f60361bf 100644
--- a/test/MC/SystemZ/insn-bad.s
+++ b/test/MC/SystemZ/insn-bad.s
@@ -127,6 +127,16 @@
ah %r0, -1
ah %r0, 4096
+#CHECK: error: instruction requires: high-word
+#CHECK: ahhhr %r0, %r0, %r0
+
+ ahhhr %r0, %r0, %r0
+
+#CHECK: error: instruction requires: high-word
+#CHECK: ahhlr %r0, %r0, %r0
+
+ ahhlr %r0, %r0, %r0
+
#CHECK: error: invalid operand
#CHECK: ahi %r0, -32769
#CHECK: error: invalid operand
@@ -222,6 +232,16 @@
algrk %r2,%r3,%r4
+#CHECK: error: instruction requires: high-word
+#CHECK: alhhhr %r0, %r0, %r0
+
+ alhhhr %r0, %r0, %r0
+
+#CHECK: error: instruction requires: high-word
+#CHECK: alhhlr %r0, %r0, %r0
+
+ alhhlr %r0, %r0, %r0
+
#CHECK: error: instruction requires: distinct-ops
#CHECK: alhsik %r1, %r2, 3
@@ -266,6 +286,16 @@
alsi 0, -129
alsi 0, 128
+#CHECK: error: instruction requires: high-word
+#CHECK: alsih %r0, 0
+
+ alsih %r0, 0
+
+#CHECK: error: instruction requires: high-word
+#CHECK: alsihn %r0, 0
+
+ alsihn %r0, 0
+
#CHECK: error: invalid operand
#CHECK: aly %r0, -524289
#CHECK: error: invalid operand
@@ -1176,6 +1206,11 @@
chf %r0, 0
+#CHECK: error: instruction requires: high-word
+#CHECK: chhr %r0, %r0
+
+ chhr %r0, %r0
+
#CHECK: error: invalid operand
#CHECK: chhsi -1, 0
#CHECK: error: invalid operand
@@ -1204,6 +1239,11 @@
chi %r0, 32768
chi %r0, foo
+#CHECK: error: instruction requires: high-word
+#CHECK: chlr %r0, %r0
+
+ chlr %r0, %r0
+
#CHECK: error: offset out of range
#CHECK: chrl %r0, -0x1000000002
#CHECK: error: offset out of range
@@ -1628,6 +1668,11 @@
clhf %r0, 0
+#CHECK: error: instruction requires: high-word
+#CHECK: clhhr %r0, %r0
+
+ clhhr %r0, %r0
+
#CHECK: error: invalid operand
#CHECK: clhhsi -1, 0
#CHECK: error: invalid operand
@@ -1645,6 +1690,11 @@
clhhsi 0, -1
clhhsi 0, 65536
+#CHECK: error: instruction requires: high-word
+#CHECK: clhlr %r0, %r0
+
+ clhlr %r0, %r0
+
#CHECK: error: offset out of range
#CHECK: clhrl %r0, -0x1000000002
#CHECK: error: offset out of range
@@ -1957,6 +2007,16 @@
csg %r0, %r0, 524288
csg %r0, %r0, 0(%r1,%r2)
+#CHECK: error: invalid register pair
+#CHECK: csp %r1, %r0
+
+ csp %r1, %r0
+
+#CHECK: error: invalid register pair
+#CHECK: cspg %r1, %r0
+
+ cspg %r1, %r0
+
#CHECK: error: invalid use of indexed addressing
#CHECK: csst 160(%r1,%r15), 160(%r15), %r2
#CHECK: error: invalid operand
@@ -2325,6 +2385,17 @@
deb %f0, 4096
#CHECK: error: invalid operand
+#CHECK: diag %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: diag %r0, %r0, 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: diag %r0, %r0, 0(%r1,%r2)
+
+ diag %r0, %r0, -1
+ diag %r0, %r0, 4096
+ diag %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: didbr %f0, %f0, %f0, -1
#CHECK: error: invalid operand
#CHECK: didbr %f0, %f0, %f0, 16
@@ -2619,6 +2690,11 @@
eextr %f2, %f0
#CHECK: error: invalid register pair
+#CHECK: esta %r1, %r0
+
+ esta %r1, %r0
+
+#CHECK: error: invalid register pair
#CHECK: esxtr %f0, %f2
#CHECK: error: invalid register pair
#CHECK: esxtr %f2, %f0
@@ -2784,6 +2860,14 @@
icy %r0, -524289
icy %r0, 524288
+#CHECK: error: invalid operand
+#CHECK: idte %r0, %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: idte %r0, %r0, %r0, 16
+
+ idte %r0, %r0, %r0, -1
+ idte %r0, %r0, %r0, 16
+
#CHECK: error: invalid register pair
#CHECK: iextr %f0, %f0, %f2
#CHECK: error: invalid register pair
@@ -2844,6 +2928,14 @@
iill %r0, 0x10000
#CHECK: error: invalid operand
+#CHECK: ipte %r0, %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: ipte %r0, %r0, %r0, 16
+
+ ipte %r0, %r0, %r0, -1
+ ipte %r0, %r0, %r0, 16
+
+#CHECK: error: invalid operand
#CHECK: kdb %f0, -1
#CHECK: error: invalid operand
#CHECK: kdb %f0, 4096
@@ -3018,6 +3110,23 @@
larl %r0, 1
larl %r0, 0x100000000
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lasp 160(%r1,%r15),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: lasp -1(%r1),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: lasp 4096(%r1),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: lasp 0(%r1),-1(%r15)
+#CHECK: error: invalid operand
+#CHECK: lasp 0(%r1),4096(%r15)
+
+ lasp 160(%r1,%r15),160(%r15)
+ lasp -1(%r1),160(%r15)
+ lasp 4096(%r1),160(%r15)
+ lasp 0(%r1),-1(%r15)
+ lasp 0(%r1),4096(%r15)
+
#CHECK: error: instruction requires: interlocked-access1
#CHECK: lax %r1, %r2, 100(%r3)
lax %r1, %r2, 100(%r3)
@@ -3047,6 +3156,39 @@
lbh %r0, 0
+#CHECK: error: invalid operand
+#CHECK: lcctl -1
+#CHECK: error: invalid operand
+#CHECK: lcctl 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lcctl 0(%r1,%r2)
+
+ lcctl -1
+ lcctl 4096
+ lcctl 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: lctl %c0, %c0, -1
+#CHECK: error: invalid operand
+#CHECK: lctl %c0, %c0, 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lctl %c0, %c0, 0(%r1,%r2)
+
+ lctl %c0, %c0, -1
+ lctl %c0, %c0, 4096
+ lctl %c0, %c0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: lctlg %c0, %c0, -524289
+#CHECK: error: invalid operand
+#CHECK: lctlg %c0, %c0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lctlg %c0, %c0, 0(%r1,%r2)
+
+ lctlg %c0, %c0, -524289
+ lctlg %c0, %c0, 524288
+ lctlg %c0, %c0, 0(%r1,%r2)
+
#CHECK: error: invalid register pair
#CHECK: lcxbr %f0, %f2
#CHECK: error: invalid register pair
@@ -3574,6 +3716,17 @@
lnxr %f0, %f2
lnxr %f2, %f0
+#CHECK: error: invalid operand
+#CHECK: lpctl -1
+#CHECK: error: invalid operand
+#CHECK: lpctl 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lpctl 0(%r1,%r2)
+
+ lpctl -1
+ lpctl 4096
+ lpctl 0(%r1,%r2)
+
#CHECK: error: instruction requires: interlocked-access1
#CHECK: lpd %r0, 0, 0
lpd %r0, 0, 0
@@ -3582,6 +3735,17 @@
#CHECK: lpdg %r0, 0, 0
lpdg %r0, 0, 0
+#CHECK: error: invalid operand
+#CHECK: lpp -1
+#CHECK: error: invalid operand
+#CHECK: lpp 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lpp 0(%r1,%r2)
+
+ lpp -1
+ lpp 4096
+ lpp 0(%r1,%r2)
+
#CHECK: error: invalid register pair
#CHECK: lpq %r1, 0
#CHECK: error: invalid operand
@@ -3593,6 +3757,36 @@
lpq %r0, -524289
lpq %r0, 524288
+#CHECK: error: invalid operand
+#CHECK: lptea %r0, %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: lptea %r0, %r0, %r0, 16
+
+ lptea %r0, %r0, %r0, -1
+ lptea %r0, %r0, %r0, 16
+
+#CHECK: error: invalid operand
+#CHECK: lpsw -1
+#CHECK: error: invalid operand
+#CHECK: lpsw 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lpsw 0(%r1,%r2)
+
+ lpsw -1
+ lpsw 4096
+ lpsw 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: lpswe -1
+#CHECK: error: invalid operand
+#CHECK: lpswe 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lpswe 0(%r1,%r2)
+
+ lpswe -1
+ lpswe 4096
+ lpswe 0(%r1,%r2)
+
#CHECK: error: invalid register pair
#CHECK: lpxbr %f0, %f2
#CHECK: error: invalid register pair
@@ -3609,6 +3803,30 @@
lpxr %f0, %f2
lpxr %f2, %f0
+#CHECK: error: invalid operand
+#CHECK: lra %r0, -1
+#CHECK: error: invalid operand
+#CHECK: lra %r0, 4096
+
+ lra %r0, -1
+ lra %r0, 4096
+
+#CHECK: error: invalid operand
+#CHECK: lrag %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: lrag %r0, 524288
+
+ lrag %r0, -524289
+ lrag %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: lray %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: lray %r0, 524288
+
+ lray %r0, -524289
+ lray %r0, 524288
+
#CHECK: error: invalid register pair
#CHECK: lrdr %f0, %f2
@@ -3645,6 +3863,17 @@
lrvg %r0, 524288
#CHECK: error: invalid operand
+#CHECK: lsctl -1
+#CHECK: error: invalid operand
+#CHECK: lsctl 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lsctl 0(%r1,%r2)
+
+ lsctl -1
+ lsctl 4096
+ lsctl 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: lt %r0, -524289
#CHECK: error: invalid operand
#CHECK: lt %r0, 524288
@@ -4089,6 +4318,17 @@
ms %r0, 4096
#CHECK: error: invalid operand
+#CHECK: msch -1
+#CHECK: error: invalid operand
+#CHECK: msch 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: msch 0(%r1,%r2)
+
+ msch -1
+ msch 4096
+ msch 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: msd %f0, %f0, -1
#CHECK: error: invalid operand
#CHECK: msd %f0, %f0, 4096
@@ -4152,6 +4392,11 @@
msgfi %r0, (-1 << 31) - 1
msgfi %r0, (1 << 31)
+#CHECK: error: invalid register pair
+#CHECK: msta %r1
+
+ msta %r1
+
#CHECK: error: invalid operand
#CHECK: msy %r0, -524289
#CHECK: error: invalid operand
@@ -4204,6 +4449,23 @@
mvc 0(1,%r2), 0(%r1,%r2)
mvc 0(-), 0
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvcdk 160(%r1,%r15),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: mvcdk -1(%r1),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: mvcdk 4096(%r1),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: mvcdk 0(%r1),-1(%r15)
+#CHECK: error: invalid operand
+#CHECK: mvcdk 0(%r1),4096(%r15)
+
+ mvcdk 160(%r1,%r15),160(%r15)
+ mvcdk -1(%r1),160(%r15)
+ mvcdk 4096(%r1),160(%r15)
+ mvcdk 0(%r1),-1(%r15)
+ mvcdk 0(%r1),4096(%r15)
+
#CHECK: error: missing length in address
#CHECK: mvcin 0, 0
#CHECK: error: missing length in address
@@ -4313,6 +4575,98 @@
mvclu %r0, %r0, -524289
mvclu %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvcos 160(%r1,%r15), 160(%r15), %r2
+#CHECK: error: invalid operand
+#CHECK: mvcos -1(%r1), 160(%r15), %r2
+#CHECK: error: invalid operand
+#CHECK: mvcos 4096(%r1), 160(%r15), %r2
+#CHECK: error: invalid operand
+#CHECK: mvcos 0(%r1), -1(%r15), %r2
+#CHECK: error: invalid operand
+#CHECK: mvcos 0(%r1), 4096(%r15), %r2
+
+ mvcos 160(%r1,%r15), 160(%r15), %r2
+ mvcos -1(%r1), 160(%r15), %r2
+ mvcos 4096(%r1), 160(%r15), %r2
+ mvcos 0(%r1), -1(%r15), %r2
+ mvcos 0(%r1), 4096(%r15), %r2
+
+#CHECK: error: invalid use of length addressing
+#CHECK: mvcp 0(%r1,%r1), 0(2,%r1), %r3
+#CHECK: error: invalid operand
+#CHECK: mvcp -1(%r1,%r1), 0(%r1), %r3
+#CHECK: error: invalid operand
+#CHECK: mvcp 4096(%r1,%r1), 0(%r1), %r3
+#CHECK: error: invalid operand
+#CHECK: mvcp 0(%r1,%r1), -1(%r1), %r3
+#CHECK: error: invalid operand
+#CHECK: mvcp 0(%r1,%r1), 4096(%r1), %r3
+#CHECK: error: %r0 used in an address
+#CHECK: mvcp 0(%r1,%r0), 0(%r1), %r3
+#CHECK: error: %r0 used in an address
+#CHECK: mvcp 0(%r1,%r1), 0(%r0), %r3
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvcp 0(%r1,%r2), 0(%r1,%r2), %r3
+#CHECK: error: unknown token in expression
+#CHECK: mvcp 0(-), 0, %r3
+
+ mvcp 0(%r1,%r1), 0(2,%r1), %r3
+ mvcp -1(%r1,%r1), 0(%r1), %r3
+ mvcp 4096(%r1,%r1), 0(%r1), %r3
+ mvcp 0(%r1,%r1), -1(%r1), %r3
+ mvcp 0(%r1,%r1), 4096(%r1), %r3
+ mvcp 0(%r1,%r0), 0(%r1), %r3
+ mvcp 0(%r1,%r1), 0(%r0), %r3
+ mvcp 0(%r1,%r2), 0(%r1,%r2), %r3
+ mvcp 0(-), 0, %r3
+
+#CHECK: error: invalid use of length addressing
+#CHECK: mvcs 0(%r1,%r1), 0(2,%r1), %r3
+#CHECK: error: invalid operand
+#CHECK: mvcs -1(%r1,%r1), 0(%r1), %r3
+#CHECK: error: invalid operand
+#CHECK: mvcs 4096(%r1,%r1), 0(%r1), %r3
+#CHECK: error: invalid operand
+#CHECK: mvcs 0(%r1,%r1), -1(%r1), %r3
+#CHECK: error: invalid operand
+#CHECK: mvcs 0(%r1,%r1), 4096(%r1), %r3
+#CHECK: error: %r0 used in an address
+#CHECK: mvcs 0(%r1,%r0), 0(%r1), %r3
+#CHECK: error: %r0 used in an address
+#CHECK: mvcs 0(%r1,%r1), 0(%r0), %r3
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvcs 0(%r1,%r2), 0(%r1,%r2), %r3
+#CHECK: error: unknown token in expression
+#CHECK: mvcs 0(-), 0, %r3
+
+ mvcs 0(%r1,%r1), 0(2,%r1), %r3
+ mvcs -1(%r1,%r1), 0(%r1), %r3
+ mvcs 4096(%r1,%r1), 0(%r1), %r3
+ mvcs 0(%r1,%r1), -1(%r1), %r3
+ mvcs 0(%r1,%r1), 4096(%r1), %r3
+ mvcs 0(%r1,%r0), 0(%r1), %r3
+ mvcs 0(%r1,%r1), 0(%r0), %r3
+ mvcs 0(%r1,%r2), 0(%r1,%r2), %r3
+ mvcs 0(-), 0, %r3
+
+#CHECK: error: invalid use of indexed addressing
+#CHECK: mvcsk 160(%r1,%r15),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: mvcsk -1(%r1),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: mvcsk 4096(%r1),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: mvcsk 0(%r1),-1(%r15)
+#CHECK: error: invalid operand
+#CHECK: mvcsk 0(%r1),4096(%r15)
+
+ mvcsk 160(%r1,%r15),160(%r15)
+ mvcsk -1(%r1),160(%r15)
+ mvcsk 4096(%r1),160(%r15)
+ mvcsk 0(%r1),-1(%r15)
+ mvcsk 0(%r1),4096(%r15)
+
#CHECK: error: invalid operand
#CHECK: mvghi -1, 0
#CHECK: error: invalid operand
@@ -5008,11 +5362,27 @@
pack 0(1,%r2), 0(%r1,%r2)
pack 0(-), 0(1)
+#CHECK: error: invalid operand
+#CHECK: pc -1
+#CHECK: error: invalid operand
+#CHECK: pc 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: pc 0(%r1,%r2)
+
+ pc -1
+ pc 4096
+ pc 0(%r1,%r2)
+
#CHECK: error: instruction requires: message-security-assist-extension4
#CHECK: pcc
pcc
+#CHECK: error: instruction requires: message-security-assist-extension3
+#CHECK: pckmo
+
+ pckmo
+
#CHECK: error: invalid operand
#CHECK: pfd -1, 0
#CHECK: error: invalid operand
@@ -5187,6 +5557,28 @@
qaxtr %f2, %f0, %f0, 0
#CHECK: error: invalid operand
+#CHECK: qctri -1
+#CHECK: error: invalid operand
+#CHECK: qctri 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: qctri 0(%r1,%r2)
+
+ qctri -1
+ qctri 4096
+ qctri 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: qsi -1
+#CHECK: error: invalid operand
+#CHECK: qsi 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: qsi 0(%r1,%r2)
+
+ qsi -1
+ qsi 4096
+ qsi 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: risbg %r0,%r0,0,0,-1
#CHECK: error: invalid operand
#CHECK: risbg %r0,%r0,0,0,64
@@ -5285,6 +5677,22 @@
rosbg %r0,%r0,256,0,0
#CHECK: error: invalid operand
+#CHECK: rp -1
+#CHECK: error: invalid operand
+#CHECK: rp 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: rp 0(%r1,%r2)
+
+ rp -1
+ rp 4096
+ rp 0(%r1,%r2)
+
+#CHECK: error: instruction requires: reset-reference-bits-multiple
+#CHECK: rrbm %r0, %r0
+
+ rrbm %r0, %r0
+
+#CHECK: error: invalid operand
#CHECK: rrdtr %f0, %f0, %f0, -1
#CHECK: error: invalid operand
#CHECK: rrdtr %f0, %f0, %f0, 16
@@ -5338,6 +5746,50 @@
s %r0, 4096
#CHECK: error: invalid operand
+#CHECK: sac -1
+#CHECK: error: invalid operand
+#CHECK: sac 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sac 0(%r1,%r2)
+
+ sac -1
+ sac 4096
+ sac 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: sacf -1
+#CHECK: error: invalid operand
+#CHECK: sacf 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sacf 0(%r1,%r2)
+
+ sacf -1
+ sacf 4096
+ sacf 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: sck -1
+#CHECK: error: invalid operand
+#CHECK: sck 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sck 0(%r1,%r2)
+
+ sck -1
+ sck 4096
+ sck 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: sckc -1
+#CHECK: error: invalid operand
+#CHECK: sckc 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sckc 0(%r1,%r2)
+
+ sckc -1
+ sckc 4096
+ sckc 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: sd %f0, -1
#CHECK: error: invalid operand
#CHECK: sd %f0, 4096
@@ -5403,6 +5855,16 @@
sh %r0, -1
sh %r0, 4096
+#CHECK: error: instruction requires: high-word
+#CHECK: shhhr %r0, %r0, %r0
+
+ shhhr %r0, %r0, %r0
+
+#CHECK: error: instruction requires: high-word
+#CHECK: shhlr %r0, %r0, %r0
+
+ shhlr %r0, %r0, %r0
+
#CHECK: error: invalid operand
#CHECK: shy %r0, -524289
#CHECK: error: invalid operand
@@ -5412,6 +5874,39 @@
shy %r0, 524288
#CHECK: error: invalid operand
+#CHECK: sie -1
+#CHECK: error: invalid operand
+#CHECK: sie 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sie 0(%r1,%r2)
+
+ sie -1
+ sie 4096
+ sie 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: siga -1
+#CHECK: error: invalid operand
+#CHECK: siga 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: siga 0(%r1,%r2)
+
+ siga -1
+ siga 4096
+ siga 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: sigp %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: sigp %r0, %r0, 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: sigp %r0, %r0, 0(%r1,%r2)
+
+ sigp %r0, %r0, -1
+ sigp %r0, %r0, 4096
+ sigp %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: sl %r0, -1
#CHECK: error: invalid operand
#CHECK: sl %r0, 4096
@@ -5547,6 +6042,16 @@
slgrk %r2,%r3,%r4
+#CHECK: error: instruction requires: high-word
+#CHECK: slhhhr %r0, %r0, %r0
+
+ slhhhr %r0, %r0, %r0
+
+#CHECK: error: instruction requires: high-word
+#CHECK: slhhlr %r0, %r0, %r0
+
+ slhhlr %r0, %r0, %r0
+
#CHECK: error: invalid operand
#CHECK: sll %r0,-1
#CHECK: error: invalid operand
@@ -5661,6 +6166,39 @@
sp 0(-), 0(1)
#CHECK: error: invalid operand
+#CHECK: spka -1
+#CHECK: error: invalid operand
+#CHECK: spka 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: spka 0(%r1,%r2)
+
+ spka -1
+ spka 4096
+ spka 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: spt -1
+#CHECK: error: invalid operand
+#CHECK: spt 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: spt 0(%r1,%r2)
+
+ spt -1
+ spt 4096
+ spt 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: spx -1
+#CHECK: error: invalid operand
+#CHECK: spx 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: spx 0(%r1,%r2)
+
+ spx -1
+ spx 4096
+ spx 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: sqd %f0, -1
#CHECK: error: invalid operand
#CHECK: sqd %f0, 4096
@@ -5913,6 +6451,36 @@
srxt %f2, %f0, 0
#CHECK: error: invalid operand
+#CHECK: ssch -1
+#CHECK: error: invalid operand
+#CHECK: ssch 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: ssch 0(%r1,%r2)
+
+ ssch -1
+ ssch 4096
+ ssch 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: sske %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: sske %r0, %r0, 16
+
+ sske %r0, %r0, -1
+ sske %r0, %r0, 16
+
+#CHECK: error: invalid operand
+#CHECK: ssm -1
+#CHECK: error: invalid operand
+#CHECK: ssm 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: ssm 0(%r1,%r2)
+
+ ssm -1
+ ssm 4096
+ ssm 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: st %r0, -1
#CHECK: error: invalid operand
#CHECK: st %r0, 4096
@@ -5940,6 +6508,17 @@
stamy %a0, %a0, 0(%r1,%r2)
#CHECK: error: invalid operand
+#CHECK: stap -1
+#CHECK: error: invalid operand
+#CHECK: stap 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stap 0(%r1,%r2)
+
+ stap -1
+ stap 4096
+ stap 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: stc %r0, -1
#CHECK: error: invalid operand
#CHECK: stc %r0, 4096
@@ -5953,6 +6532,50 @@
stch %r0, 0
#CHECK: error: invalid operand
+#CHECK: stck -1
+#CHECK: error: invalid operand
+#CHECK: stck 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stck 0(%r1,%r2)
+
+ stck -1
+ stck 4096
+ stck 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: stckc -1
+#CHECK: error: invalid operand
+#CHECK: stckc 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stckc 0(%r1,%r2)
+
+ stckc -1
+ stckc 4096
+ stckc 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: stcke -1
+#CHECK: error: invalid operand
+#CHECK: stcke 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stcke 0(%r1,%r2)
+
+ stcke -1
+ stcke 4096
+ stcke 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: stckf -1
+#CHECK: error: invalid operand
+#CHECK: stckf 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stckf 0(%r1,%r2)
+
+ stckf -1
+ stckf 4096
+ stckf 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: stcm %r0, 0, -1
#CHECK: error: invalid operand
#CHECK: stcm %r0, 0, 4096
@@ -5995,6 +6618,50 @@
stcmy %r0, 16, 0
#CHECK: error: invalid operand
+#CHECK: stcps -1
+#CHECK: error: invalid operand
+#CHECK: stcps 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stcps 0(%r1,%r2)
+
+ stcps -1
+ stcps 4096
+ stcps 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: stcrw -1
+#CHECK: error: invalid operand
+#CHECK: stcrw 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stcrw 0(%r1,%r2)
+
+ stcrw -1
+ stcrw 4096
+ stcrw 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: stctg %c0, %c0, -524289
+#CHECK: error: invalid operand
+#CHECK: stctg %c0, %c0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stctg %c0, %c0, 0(%r1,%r2)
+
+ stctg %c0, %c0, -524289
+ stctg %c0, %c0, 524288
+ stctg %c0, %c0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: stctl %c0, %c0, -1
+#CHECK: error: invalid operand
+#CHECK: stctl %c0, %c0, 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stctl %c0, %c0, 0(%r1,%r2)
+
+ stctl %c0, %c0, -1
+ stctl %c0, %c0, 4096
+ stctl %c0, %c0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: stcy %r0, -524289
#CHECK: error: invalid operand
#CHECK: stcy %r0, 524288
@@ -6040,6 +6707,28 @@
stfh %r0, 0
#CHECK: error: invalid operand
+#CHECK: stfl -1
+#CHECK: error: invalid operand
+#CHECK: stfl 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stfl 0(%r1,%r2)
+
+ stfl -1
+ stfl 4096
+ stfl 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: stfle -1
+#CHECK: error: invalid operand
+#CHECK: stfle 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stfle 0(%r1,%r2)
+
+ stfle -1
+ stfle 4096
+ stfle 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: stfpc -1
#CHECK: error: invalid operand
#CHECK: stfpc 4096
@@ -6108,6 +6797,17 @@
sthy %r0, 524288
#CHECK: error: invalid operand
+#CHECK: stidp -1
+#CHECK: error: invalid operand
+#CHECK: stidp 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stidp 0(%r1,%r2)
+
+ stidp -1
+ stidp 4096
+ stidp 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: stm %r0, %r0, 4096
#CHECK: error: invalid use of indexed addressing
#CHECK: stm %r0, %r0, 0(%r1,%r2)
@@ -6148,6 +6848,51 @@
stmy %r0, %r0, 524288
stmy %r0, %r0, 0(%r1,%r2)
+#CHECK: error: invalid operand
+#CHECK: stnsm -1, 0
+#CHECK: error: invalid operand
+#CHECK: stnsm 4096, 0
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stnsm 0(%r1,%r2), 0
+#CHECK: error: invalid operand
+#CHECK: stnsm 0, -1
+#CHECK: error: invalid operand
+#CHECK: stnsm 0, 256
+
+ stnsm -1, 0
+ stnsm 4096, 0
+ stnsm 0(%r1,%r2), 0
+ stnsm 0, -1
+ stnsm 0, 256
+
+#CHECK: error: invalid operand
+#CHECK: stosm -1, 0
+#CHECK: error: invalid operand
+#CHECK: stosm 4096, 0
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stosm 0(%r1,%r2), 0
+#CHECK: error: invalid operand
+#CHECK: stosm 0, -1
+#CHECK: error: invalid operand
+#CHECK: stosm 0, 256
+
+ stosm -1, 0
+ stosm 4096, 0
+ stosm 0(%r1,%r2), 0
+ stosm 0, -1
+ stosm 0, 256
+
+#CHECK: error: invalid operand
+#CHECK: stpt -1
+#CHECK: error: invalid operand
+#CHECK: stpt 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stpt 0(%r1,%r2)
+
+ stpt -1
+ stpt 4096
+ stpt 0(%r1,%r2)
+
#CHECK: error: invalid register pair
#CHECK: stpq %r1, 0
#CHECK: error: invalid operand
@@ -6159,6 +6904,17 @@
stpq %r0, -524289
stpq %r0, 524288
+#CHECK: error: invalid operand
+#CHECK: stpx -1
+#CHECK: error: invalid operand
+#CHECK: stpx 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stpx 0(%r1,%r2)
+
+ stpx -1
+ stpx 4096
+ stpx 0(%r1,%r2)
+
#CHECK: error: invalid use of indexed addressing
#CHECK: strag 160(%r1,%r15),160(%r15)
#CHECK: error: invalid operand
@@ -6207,6 +6963,28 @@
strvg %r0, 524288
#CHECK: error: invalid operand
+#CHECK: stsch -1
+#CHECK: error: invalid operand
+#CHECK: stsch 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stsch 0(%r1,%r2)
+
+ stsch -1
+ stsch 4096
+ stsch 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: stsi -1
+#CHECK: error: invalid operand
+#CHECK: stsi 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: stsi 0(%r1,%r2)
+
+ stsi -1
+ stsi 4096
+ stsi 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: sty %r0, -524289
#CHECK: error: invalid operand
#CHECK: sty %r0, 524288
@@ -6475,6 +7253,34 @@
tp 0(%r1,%r2)
tp 0(-)
+#CHECK: error: invalid operand
+#CHECK: tpi -1
+#CHECK: error: invalid operand
+#CHECK: tpi 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: tpi 0(%r1,%r2)
+
+ tpi -1
+ tpi 4096
+ tpi 0(%r1,%r2)
+
+#CHECK: error: invalid use of indexed addressing
+#CHECK: tprot 160(%r1,%r15),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: tprot -1(%r1),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: tprot 4096(%r1),160(%r15)
+#CHECK: error: invalid operand
+#CHECK: tprot 0(%r1),-1(%r15)
+#CHECK: error: invalid operand
+#CHECK: tprot 0(%r1),4096(%r15)
+
+ tprot 160(%r1,%r15),160(%r15)
+ tprot -1(%r1),160(%r15)
+ tprot 4096(%r1),160(%r15)
+ tprot 0(%r1),-1(%r15)
+ tprot 0(%r1),4096(%r15)
+
#CHECK: error: missing length in address
#CHECK: tr 0, 0
#CHECK: error: missing length in address
@@ -6519,6 +7325,39 @@
tr 0(1,%r2), 0(%r1,%r2)
tr 0(-), 0
+#CHECK: error: invalid operand
+#CHECK: trace %r0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: trace %r0, %r0, 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: trace %r0, %r0, 0(%r1,%r2)
+
+ trace %r0, %r0, -1
+ trace %r0, %r0, 4096
+ trace %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: tracg %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: tracg %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: tracg %r0, %r0, 0(%r1,%r2)
+
+ tracg %r0, %r0, -524289
+ tracg %r0, %r0, 524288
+ tracg %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: trap4 -1
+#CHECK: error: invalid operand
+#CHECK: trap4 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: trap4 0(%r1,%r2)
+
+ trap4 -1
+ trap4 4096
+ trap4 0(%r1,%r2)
+
#CHECK: error: invalid register pair
#CHECK: tre %r1, %r0
@@ -6689,6 +7528,17 @@
ts 4096
ts 0(%r1,%r2)
+#CHECK: error: invalid operand
+#CHECK: tsch -1
+#CHECK: error: invalid operand
+#CHECK: tsch 4096
+#CHECK: error: invalid use of indexed addressing
+#CHECK: tsch 0(%r1,%r2)
+
+ tsch -1
+ tsch 4096
+ tsch 0(%r1,%r2)
+
#CHECK: error: missing length in address
#CHECK: unpk 0, 0(1)
#CHECK: error: missing length in address
diff --git a/test/MC/SystemZ/insn-good-z196.s b/test/MC/SystemZ/insn-good-z196.s
index 31d257d7448d2..a3a6628570486 100644
--- a/test/MC/SystemZ/insn-good-z196.s
+++ b/test/MC/SystemZ/insn-good-z196.s
@@ -46,6 +46,30 @@
agrk %r15,%r0,%r0
agrk %r7,%r8,%r9
+#CHECK: ahhhr %r0, %r0, %r0 # encoding: [0xb9,0xc8,0x00,0x00]
+#CHECK: ahhhr %r0, %r0, %r15 # encoding: [0xb9,0xc8,0xf0,0x00]
+#CHECK: ahhhr %r0, %r15, %r0 # encoding: [0xb9,0xc8,0x00,0x0f]
+#CHECK: ahhhr %r15, %r0, %r0 # encoding: [0xb9,0xc8,0x00,0xf0]
+#CHECK: ahhhr %r7, %r8, %r9 # encoding: [0xb9,0xc8,0x90,0x78]
+
+ ahhhr %r0, %r0, %r0
+ ahhhr %r0, %r0, %r15
+ ahhhr %r0, %r15, %r0
+ ahhhr %r15, %r0, %r0
+ ahhhr %r7, %r8, %r9
+
+#CHECK: ahhlr %r0, %r0, %r0 # encoding: [0xb9,0xd8,0x00,0x00]
+#CHECK: ahhlr %r0, %r0, %r15 # encoding: [0xb9,0xd8,0xf0,0x00]
+#CHECK: ahhlr %r0, %r15, %r0 # encoding: [0xb9,0xd8,0x00,0x0f]
+#CHECK: ahhlr %r15, %r0, %r0 # encoding: [0xb9,0xd8,0x00,0xf0]
+#CHECK: ahhlr %r7, %r8, %r9 # encoding: [0xb9,0xd8,0x90,0x78]
+
+ ahhlr %r0, %r0, %r0
+ ahhlr %r0, %r0, %r15
+ ahhlr %r0, %r15, %r0
+ ahhlr %r15, %r0, %r0
+ ahhlr %r7, %r8, %r9
+
#CHECK: ahik %r0, %r0, -32768 # encoding: [0xec,0x00,0x80,0x00,0x00,0xd8]
#CHECK: ahik %r0, %r0, -1 # encoding: [0xec,0x00,0xff,0xff,0x00,0xd8]
#CHECK: ahik %r0, %r0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0xd8]
@@ -108,6 +132,30 @@
algrk %r15,%r0,%r0
algrk %r7,%r8,%r9
+#CHECK: alhhhr %r0, %r0, %r0 # encoding: [0xb9,0xca,0x00,0x00]
+#CHECK: alhhhr %r0, %r0, %r15 # encoding: [0xb9,0xca,0xf0,0x00]
+#CHECK: alhhhr %r0, %r15, %r0 # encoding: [0xb9,0xca,0x00,0x0f]
+#CHECK: alhhhr %r15, %r0, %r0 # encoding: [0xb9,0xca,0x00,0xf0]
+#CHECK: alhhhr %r7, %r8, %r9 # encoding: [0xb9,0xca,0x90,0x78]
+
+ alhhhr %r0, %r0, %r0
+ alhhhr %r0, %r0, %r15
+ alhhhr %r0, %r15, %r0
+ alhhhr %r15, %r0, %r0
+ alhhhr %r7, %r8, %r9
+
+#CHECK: alhhlr %r0, %r0, %r0 # encoding: [0xb9,0xda,0x00,0x00]
+#CHECK: alhhlr %r0, %r0, %r15 # encoding: [0xb9,0xda,0xf0,0x00]
+#CHECK: alhhlr %r0, %r15, %r0 # encoding: [0xb9,0xda,0x00,0x0f]
+#CHECK: alhhlr %r15, %r0, %r0 # encoding: [0xb9,0xda,0x00,0xf0]
+#CHECK: alhhlr %r7, %r8, %r9 # encoding: [0xb9,0xda,0x90,0x78]
+
+ alhhlr %r0, %r0, %r0
+ alhhlr %r0, %r0, %r15
+ alhhlr %r0, %r15, %r0
+ alhhlr %r15, %r0, %r0
+ alhhlr %r7, %r8, %r9
+
#CHECK: alhsik %r0, %r0, -32768 # encoding: [0xec,0x00,0x80,0x00,0x00,0xda]
#CHECK: alhsik %r0, %r0, -1 # encoding: [0xec,0x00,0xff,0xff,0x00,0xda]
#CHECK: alhsik %r0, %r0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0xda]
@@ -138,6 +186,34 @@
alrk %r15,%r0,%r0
alrk %r7,%r8,%r9
+#CHECK: alsih %r0, -2147483648 # encoding: [0xcc,0x0a,0x80,0x00,0x00,0x00]
+#CHECK: alsih %r0, -1 # encoding: [0xcc,0x0a,0xff,0xff,0xff,0xff]
+#CHECK: alsih %r0, 0 # encoding: [0xcc,0x0a,0x00,0x00,0x00,0x00]
+#CHECK: alsih %r0, 1 # encoding: [0xcc,0x0a,0x00,0x00,0x00,0x01]
+#CHECK: alsih %r0, 2147483647 # encoding: [0xcc,0x0a,0x7f,0xff,0xff,0xff]
+#CHECK: alsih %r15, 0 # encoding: [0xcc,0xfa,0x00,0x00,0x00,0x00]
+
+ alsih %r0, -1 << 31
+ alsih %r0, -1
+ alsih %r0, 0
+ alsih %r0, 1
+ alsih %r0, (1 << 31) - 1
+ alsih %r15, 0
+
+#CHECK: alsihn %r0, -2147483648 # encoding: [0xcc,0x0b,0x80,0x00,0x00,0x00]
+#CHECK: alsihn %r0, -1 # encoding: [0xcc,0x0b,0xff,0xff,0xff,0xff]
+#CHECK: alsihn %r0, 0 # encoding: [0xcc,0x0b,0x00,0x00,0x00,0x00]
+#CHECK: alsihn %r0, 1 # encoding: [0xcc,0x0b,0x00,0x00,0x00,0x01]
+#CHECK: alsihn %r0, 2147483647 # encoding: [0xcc,0x0b,0x7f,0xff,0xff,0xff]
+#CHECK: alsihn %r15, 0 # encoding: [0xcc,0xfb,0x00,0x00,0x00,0x00]
+
+ alsihn %r0, -1 << 31
+ alsihn %r0, -1
+ alsihn %r0, 0
+ alsihn %r0, 1
+ alsihn %r0, (1 << 31) - 1
+ alsihn %r15, 0
+
#CHECK: ark %r0, %r0, %r0 # encoding: [0xb9,0xf8,0x00,0x00]
#CHECK: ark %r0, %r0, %r15 # encoding: [0xb9,0xf8,0xf0,0x00]
#CHECK: ark %r0, %r15, %r0 # encoding: [0xb9,0xf8,0x00,0x0f]
@@ -531,6 +607,26 @@
chf %r0, 524287(%r15,%r1)
chf %r15, 0
+#CHECK: chhr %r0, %r0 # encoding: [0xb9,0xcd,0x00,0x00]
+#CHECK: chhr %r0, %r15 # encoding: [0xb9,0xcd,0x00,0x0f]
+#CHECK: chhr %r15, %r0 # encoding: [0xb9,0xcd,0x00,0xf0]
+#CHECK: chhr %r7, %r8 # encoding: [0xb9,0xcd,0x00,0x78]
+
+ chhr %r0,%r0
+ chhr %r0,%r15
+ chhr %r15,%r0
+ chhr %r7,%r8
+
+#CHECK: chlr %r0, %r0 # encoding: [0xb9,0xdd,0x00,0x00]
+#CHECK: chlr %r0, %r15 # encoding: [0xb9,0xdd,0x00,0x0f]
+#CHECK: chlr %r15, %r0 # encoding: [0xb9,0xdd,0x00,0xf0]
+#CHECK: chlr %r7, %r8 # encoding: [0xb9,0xdd,0x00,0x78]
+
+ chlr %r0,%r0
+ chlr %r0,%r15
+ chlr %r15,%r0
+ chlr %r7,%r8
+
#CHECK: cih %r0, -2147483648 # encoding: [0xcc,0x0d,0x80,0x00,0x00,0x00]
#CHECK: cih %r0, -1 # encoding: [0xcc,0x0d,0xff,0xff,0xff,0xff]
#CHECK: cih %r0, 0 # encoding: [0xcc,0x0d,0x00,0x00,0x00,0x00]
@@ -707,6 +803,26 @@
clhf %r0, 524287(%r15,%r1)
clhf %r15, 0
+#CHECK: clhhr %r0, %r0 # encoding: [0xb9,0xcf,0x00,0x00]
+#CHECK: clhhr %r0, %r15 # encoding: [0xb9,0xcf,0x00,0x0f]
+#CHECK: clhhr %r15, %r0 # encoding: [0xb9,0xcf,0x00,0xf0]
+#CHECK: clhhr %r7, %r8 # encoding: [0xb9,0xcf,0x00,0x78]
+
+ clhhr %r0,%r0
+ clhhr %r0,%r15
+ clhhr %r15,%r0
+ clhhr %r7,%r8
+
+#CHECK: clhlr %r0, %r0 # encoding: [0xb9,0xdf,0x00,0x00]
+#CHECK: clhlr %r0, %r15 # encoding: [0xb9,0xdf,0x00,0x0f]
+#CHECK: clhlr %r15, %r0 # encoding: [0xb9,0xdf,0x00,0xf0]
+#CHECK: clhlr %r7, %r8 # encoding: [0xb9,0xdf,0x00,0x78]
+
+ clhlr %r0,%r0
+ clhlr %r0,%r15
+ clhlr %r15,%r0
+ clhlr %r7,%r8
+
#CHECK: clih %r0, 0 # encoding: [0xcc,0x0f,0x00,0x00,0x00,0x00]
#CHECK: clih %r0, 1 # encoding: [0xcc,0x0f,0x00,0x00,0x00,0x01]
#CHECK: clih %r0, 4294967295 # encoding: [0xcc,0x0f,0xff,0xff,0xff,0xff]
@@ -1645,6 +1761,10 @@
pcc
+#CHECK: pckmo # encoding: [0xb9,0x28,0x00,0x00]
+
+ pckmo
+
#CHECK: popcnt %r0, %r0 # encoding: [0xb9,0xe1,0x00,0x00]
#CHECK: popcnt %r0, %r15 # encoding: [0xb9,0xe1,0x00,0x0f]
#CHECK: popcnt %r15, %r0 # encoding: [0xb9,0xe1,0x00,0xf0]
@@ -1687,6 +1807,18 @@
risblg %r15,%r0,0,0,0
risblg %r4,%r5,6,7,8
+#CHECK: rrbm %r0, %r0 # encoding: [0xb9,0xae,0x00,0x00]
+#CHECK: rrbm %r0, %r15 # encoding: [0xb9,0xae,0x00,0x0f]
+#CHECK: rrbm %r15, %r0 # encoding: [0xb9,0xae,0x00,0xf0]
+#CHECK: rrbm %r7, %r8 # encoding: [0xb9,0xae,0x00,0x78]
+#CHECK: rrbm %r15, %r15 # encoding: [0xb9,0xae,0x00,0xff]
+
+ rrbm %r0,%r0
+ rrbm %r0,%r15
+ rrbm %r15,%r0
+ rrbm %r7,%r8
+ rrbm %r15,%r15
+
#CHECK: sdtra %f0, %f0, %f0, 0 # encoding: [0xb3,0xd3,0x00,0x00]
#CHECK: sdtra %f0, %f0, %f0, 15 # encoding: [0xb3,0xd3,0x0f,0x00]
#CHECK: sdtra %f0, %f0, %f15, 0 # encoding: [0xb3,0xd3,0xf0,0x00]
@@ -1713,6 +1845,30 @@
sgrk %r15,%r0,%r0
sgrk %r7,%r8,%r9
+#CHECK: shhhr %r0, %r0, %r0 # encoding: [0xb9,0xc9,0x00,0x00]
+#CHECK: shhhr %r0, %r0, %r15 # encoding: [0xb9,0xc9,0xf0,0x00]
+#CHECK: shhhr %r0, %r15, %r0 # encoding: [0xb9,0xc9,0x00,0x0f]
+#CHECK: shhhr %r15, %r0, %r0 # encoding: [0xb9,0xc9,0x00,0xf0]
+#CHECK: shhhr %r7, %r8, %r9 # encoding: [0xb9,0xc9,0x90,0x78]
+
+ shhhr %r0, %r0, %r0
+ shhhr %r0, %r0, %r15
+ shhhr %r0, %r15, %r0
+ shhhr %r15, %r0, %r0
+ shhhr %r7, %r8, %r9
+
+#CHECK: shhlr %r0, %r0, %r0 # encoding: [0xb9,0xd9,0x00,0x00]
+#CHECK: shhlr %r0, %r0, %r15 # encoding: [0xb9,0xd9,0xf0,0x00]
+#CHECK: shhlr %r0, %r15, %r0 # encoding: [0xb9,0xd9,0x00,0x0f]
+#CHECK: shhlr %r15, %r0, %r0 # encoding: [0xb9,0xd9,0x00,0xf0]
+#CHECK: shhlr %r7, %r8, %r9 # encoding: [0xb9,0xd9,0x90,0x78]
+
+ shhlr %r0, %r0, %r0
+ shhlr %r0, %r0, %r15
+ shhlr %r0, %r15, %r0
+ shhlr %r15, %r0, %r0
+ shhlr %r7, %r8, %r9
+
#CHECK: slak %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xdd]
#CHECK: slak %r15, %r1, 0 # encoding: [0xeb,0xf1,0x00,0x00,0x00,0xdd]
#CHECK: slak %r1, %r15, 0 # encoding: [0xeb,0x1f,0x00,0x00,0x00,0xdd]
@@ -1751,6 +1907,30 @@
slgrk %r15,%r0,%r0
slgrk %r7,%r8,%r9
+#CHECK: slhhhr %r0, %r0, %r0 # encoding: [0xb9,0xcb,0x00,0x00]
+#CHECK: slhhhr %r0, %r0, %r15 # encoding: [0xb9,0xcb,0xf0,0x00]
+#CHECK: slhhhr %r0, %r15, %r0 # encoding: [0xb9,0xcb,0x00,0x0f]
+#CHECK: slhhhr %r15, %r0, %r0 # encoding: [0xb9,0xcb,0x00,0xf0]
+#CHECK: slhhhr %r7, %r8, %r9 # encoding: [0xb9,0xcb,0x90,0x78]
+
+ slhhhr %r0, %r0, %r0
+ slhhhr %r0, %r0, %r15
+ slhhhr %r0, %r15, %r0
+ slhhhr %r15, %r0, %r0
+ slhhhr %r7, %r8, %r9
+
+#CHECK: slhhlr %r0, %r0, %r0 # encoding: [0xb9,0xdb,0x00,0x00]
+#CHECK: slhhlr %r0, %r0, %r15 # encoding: [0xb9,0xdb,0xf0,0x00]
+#CHECK: slhhlr %r0, %r15, %r0 # encoding: [0xb9,0xdb,0x00,0x0f]
+#CHECK: slhhlr %r15, %r0, %r0 # encoding: [0xb9,0xdb,0x00,0xf0]
+#CHECK: slhhlr %r7, %r8, %r9 # encoding: [0xb9,0xdb,0x90,0x78]
+
+ slhhlr %r0, %r0, %r0
+ slhhlr %r0, %r0, %r15
+ slhhlr %r0, %r15, %r0
+ slhhlr %r15, %r0, %r0
+ slhhlr %r7, %r8, %r9
+
#CHECK: sllk %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xdf]
#CHECK: sllk %r15, %r1, 0 # encoding: [0xeb,0xf1,0x00,0x00,0x00,0xdf]
#CHECK: sllk %r1, %r15, 0 # encoding: [0xeb,0x1f,0x00,0x00,0x00,0xdf]
diff --git a/test/MC/SystemZ/insn-good-zEC12.s b/test/MC/SystemZ/insn-good-zEC12.s
index 2fe6c46ad908b..1f1bfb883bddb 100644
--- a/test/MC/SystemZ/insn-good-zEC12.s
+++ b/test/MC/SystemZ/insn-good-zEC12.s
@@ -198,6 +198,20 @@
clgtnl %r0, 0(%r15)
clgtnh %r0, 0(%r15)
+#CHECK: crdte %r0, %r0, %r0 # encoding: [0xb9,0x8f,0x00,0x00]
+#CHECK: crdte %r0, %r0, %r14 # encoding: [0xb9,0x8f,0x00,0x0e]
+#CHECK: crdte %r0, %r15, %r0 # encoding: [0xb9,0x8f,0xf0,0x00]
+#CHECK: crdte %r14, %r0, %r0 # encoding: [0xb9,0x8f,0x00,0xe0]
+#CHECK: crdte %r0, %r0, %r0, 15 # encoding: [0xb9,0x8f,0x0f,0x00]
+#CHECK: crdte %r4, %r5, %r6, 7 # encoding: [0xb9,0x8f,0x57,0x46]
+
+ crdte %r0, %r0, %r0
+ crdte %r0, %r0, %r14
+ crdte %r0, %r15, %r0
+ crdte %r14, %r0, %r0
+ crdte %r0, %r0, %r0, 15
+ crdte %r4, %r5, %r6, 7
+
#CHECK: cxzt %f0, 0(1), 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0xab]
#CHECK: cxzt %f13, 0(1), 0 # encoding: [0xed,0x00,0x00,0x00,0xd0,0xab]
#CHECK: cxzt %f0, 0(1), 15 # encoding: [0xed,0x00,0x00,0x00,0x0f,0xab]
diff --git a/test/MC/SystemZ/insn-good.s b/test/MC/SystemZ/insn-good.s
index 73162e4eea712..5b93ef917fd32 100644
--- a/test/MC/SystemZ/insn-good.s
+++ b/test/MC/SystemZ/insn-good.s
@@ -741,6 +741,16 @@
ay %r0, 524287(%r15,%r1)
ay %r15, 0
+#CHECK: bakr %r0, %r0 # encoding: [0xb2,0x40,0x00,0x00]
+#CHECK: bakr %r0, %r15 # encoding: [0xb2,0x40,0x00,0x0f]
+#CHECK: bakr %r15, %r0 # encoding: [0xb2,0x40,0x00,0xf0]
+#CHECK: bakr %r7, %r8 # encoding: [0xb2,0x40,0x00,0x78]
+
+ bakr %r0,%r0
+ bakr %r0,%r15
+ bakr %r15,%r0
+ bakr %r7,%r8
+
#CHECK: bal %r0, 0 # encoding: [0x45,0x00,0x00,0x00]
#CHECK: bal %r1, 4095 # encoding: [0x45,0x10,0x0f,0xff]
#CHECK: bal %r2, 0(%r1) # encoding: [0x45,0x20,0x10,0x00]
@@ -799,6 +809,26 @@
bassm %r14,%r9
bassm %r15,%r1
+#CHECK: bsa %r0, %r0 # encoding: [0xb2,0x5a,0x00,0x00]
+#CHECK: bsa %r0, %r15 # encoding: [0xb2,0x5a,0x00,0x0f]
+#CHECK: bsa %r15, %r0 # encoding: [0xb2,0x5a,0x00,0xf0]
+#CHECK: bsa %r7, %r8 # encoding: [0xb2,0x5a,0x00,0x78]
+
+ bsa %r0,%r0
+ bsa %r0,%r15
+ bsa %r15,%r0
+ bsa %r7,%r8
+
+#CHECK: bsg %r0, %r0 # encoding: [0xb2,0x58,0x00,0x00]
+#CHECK: bsg %r0, %r15 # encoding: [0xb2,0x58,0x00,0x0f]
+#CHECK: bsg %r15, %r0 # encoding: [0xb2,0x58,0x00,0xf0]
+#CHECK: bsg %r7, %r8 # encoding: [0xb2,0x58,0x00,0x78]
+
+ bsg %r0,%r0
+ bsg %r0,%r15
+ bsg %r15,%r0
+ bsg %r7,%r8
+
#CHECK: bsm %r0, %r1 # encoding: [0x0b,0x01]
#CHECK: bsm %r0, %r15 # encoding: [0x0b,0x0f]
#CHECK: bsm %r14, %r9 # encoding: [0x0b,0xe9]
@@ -6257,6 +6287,10 @@
cs %r0, %r15, 0
cs %r15, %r0, 0
+#CHECK: csch # encoding: [0xb2,0x30,0x00,0x00]
+
+ csch
+
#CHECK: csdtr %r0, %f0, 0 # encoding: [0xb3,0xe3,0x00,0x00]
#CHECK: csdtr %r0, %f15, 0 # encoding: [0xb3,0xe3,0x00,0x0f]
#CHECK: csdtr %r0, %f0, 15 # encoding: [0xb3,0xe3,0x0f,0x00]
@@ -6293,6 +6327,26 @@
csg %r0, %r15, 0
csg %r15, %r0, 0
+#CHECK: csp %r0, %r0 # encoding: [0xb2,0x50,0x00,0x00]
+#CHECK: csp %r0, %r15 # encoding: [0xb2,0x50,0x00,0x0f]
+#CHECK: csp %r14, %r0 # encoding: [0xb2,0x50,0x00,0xe0]
+#CHECK: csp %r6, %r8 # encoding: [0xb2,0x50,0x00,0x68]
+
+ csp %r0,%r0
+ csp %r0,%r15
+ csp %r14,%r0
+ csp %r6,%r8
+
+#CHECK: cspg %r0, %r0 # encoding: [0xb9,0x8a,0x00,0x00]
+#CHECK: cspg %r0, %r15 # encoding: [0xb9,0x8a,0x00,0x0f]
+#CHECK: cspg %r14, %r0 # encoding: [0xb9,0x8a,0x00,0xe0]
+#CHECK: cspg %r6, %r8 # encoding: [0xb9,0x8a,0x00,0x68]
+
+ cspg %r0,%r0
+ cspg %r0,%r15
+ cspg %r14,%r0
+ cspg %r6,%r8
+
#CHECK: csst 0, 0, %r0 # encoding: [0xc8,0x02,0x00,0x00,0x00,0x00]
#CHECK: csst 0(%r1), 0(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x00,0xf0,0x00]
#CHECK: csst 1(%r1), 0(%r15), %r2 # encoding: [0xc8,0x22,0x10,0x01,0xf0,0x00]
@@ -6869,6 +6923,28 @@
der %f7, %f8
der %f15, %f0
+#CHECK: diag %r0, %r0, 0 # encoding: [0x83,0x00,0x00,0x00]
+#CHECK: diag %r0, %r15, 0 # encoding: [0x83,0x0f,0x00,0x00]
+#CHECK: diag %r14, %r15, 0 # encoding: [0x83,0xef,0x00,0x00]
+#CHECK: diag %r15, %r15, 0 # encoding: [0x83,0xff,0x00,0x00]
+#CHECK: diag %r0, %r0, 4095 # encoding: [0x83,0x00,0x0f,0xff]
+#CHECK: diag %r0, %r0, 1 # encoding: [0x83,0x00,0x00,0x01]
+#CHECK: diag %r0, %r0, 0(%r1) # encoding: [0x83,0x00,0x10,0x00]
+#CHECK: diag %r0, %r0, 0(%r15) # encoding: [0x83,0x00,0xf0,0x00]
+#CHECK: diag %r0, %r0, 4095(%r1) # encoding: [0x83,0x00,0x1f,0xff]
+#CHECK: diag %r0, %r0, 4095(%r15) # encoding: [0x83,0x00,0xff,0xff]
+
+ diag %r0,%r0,0
+ diag %r0,%r15,0
+ diag %r14,%r15,0
+ diag %r15,%r15,0
+ diag %r0,%r0,4095
+ diag %r0,%r0,1
+ diag %r0,%r0,0(%r1)
+ diag %r0,%r0,0(%r15)
+ diag %r0,%r0,4095(%r1)
+ diag %r0,%r0,4095(%r15)
+
#CHECK: didbr %f0, %f0, %f0, 0 # encoding: [0xb3,0x5b,0x00,0x00]
#CHECK: didbr %f0, %f0, %f0, 15 # encoding: [0xb3,0x5b,0x0f,0x00]
#CHECK: didbr %f0, %f0, %f15, 0 # encoding: [0xb3,0x5b,0x00,0x0f]
@@ -7137,6 +7213,26 @@
ecag %r0,%r0,524287(%r1)
ecag %r0,%r0,524287(%r15)
+#CHECK: ecctr %r0, %r0 # encoding: [0xb2,0xe4,0x00,0x00]
+#CHECK: ecctr %r0, %r15 # encoding: [0xb2,0xe4,0x00,0x0f]
+#CHECK: ecctr %r15, %r0 # encoding: [0xb2,0xe4,0x00,0xf0]
+#CHECK: ecctr %r7, %r8 # encoding: [0xb2,0xe4,0x00,0x78]
+
+ ecctr %r0,%r0
+ ecctr %r0,%r15
+ ecctr %r15,%r0
+ ecctr %r7,%r8
+
+#CHECK: ecpga %r0, %r0 # encoding: [0xb2,0xed,0x00,0x00]
+#CHECK: ecpga %r0, %r15 # encoding: [0xb2,0xed,0x00,0x0f]
+#CHECK: ecpga %r15, %r0 # encoding: [0xb2,0xed,0x00,0xf0]
+#CHECK: ecpga %r7, %r8 # encoding: [0xb2,0xed,0x00,0x78]
+
+ ecpga %r0,%r0
+ ecpga %r0,%r15
+ ecpga %r15,%r0
+ ecpga %r7,%r8
+
#CHECK: ectg 0, 0, %r0 # encoding: [0xc8,0x01,0x00,0x00,0x00,0x00]
#CHECK: ectg 0(%r1), 0(%r15), %r2 # encoding: [0xc8,0x21,0x10,0x00,0xf0,0x00]
#CHECK: ectg 1(%r1), 0(%r15), %r2 # encoding: [0xc8,0x21,0x10,0x01,0xf0,0x00]
@@ -7231,6 +7327,32 @@
efpc %r1
efpc %r15
+#CHECK: epar %r0 # encoding: [0xb2,0x26,0x00,0x00]
+#CHECK: epar %r1 # encoding: [0xb2,0x26,0x00,0x10]
+#CHECK: epar %r15 # encoding: [0xb2,0x26,0x00,0xf0]
+
+ epar %r0
+ epar %r1
+ epar %r15
+
+#CHECK: epair %r0 # encoding: [0xb9,0x9a,0x00,0x00]
+#CHECK: epair %r1 # encoding: [0xb9,0x9a,0x00,0x10]
+#CHECK: epair %r15 # encoding: [0xb9,0x9a,0x00,0xf0]
+
+ epair %r0
+ epair %r1
+ epair %r15
+
+#CHECK: epctr %r0, %r0 # encoding: [0xb2,0xe5,0x00,0x00]
+#CHECK: epctr %r0, %r15 # encoding: [0xb2,0xe5,0x00,0x0f]
+#CHECK: epctr %r15, %r0 # encoding: [0xb2,0xe5,0x00,0xf0]
+#CHECK: epctr %r7, %r8 # encoding: [0xb2,0xe5,0x00,0x78]
+
+ epctr %r0,%r0
+ epctr %r0,%r15
+ epctr %r15,%r0
+ epctr %r7,%r8
+
#CHECK: epsw %r0, %r8 # encoding: [0xb9,0x8d,0x00,0x08]
#CHECK: epsw %r0, %r15 # encoding: [0xb9,0x8d,0x00,0x0f]
#CHECK: epsw %r15, %r0 # encoding: [0xb9,0x8d,0x00,0xf0]
@@ -7241,6 +7363,42 @@
epsw %r15, %r0
epsw %r15, %r8
+#CHECK: ereg %r0, %r0 # encoding: [0xb2,0x49,0x00,0x00]
+#CHECK: ereg %r0, %r15 # encoding: [0xb2,0x49,0x00,0x0f]
+#CHECK: ereg %r15, %r0 # encoding: [0xb2,0x49,0x00,0xf0]
+#CHECK: ereg %r7, %r8 # encoding: [0xb2,0x49,0x00,0x78]
+
+ ereg %r0,%r0
+ ereg %r0,%r15
+ ereg %r15,%r0
+ ereg %r7,%r8
+
+#CHECK: eregg %r0, %r0 # encoding: [0xb9,0x0e,0x00,0x00]
+#CHECK: eregg %r0, %r15 # encoding: [0xb9,0x0e,0x00,0x0f]
+#CHECK: eregg %r15, %r0 # encoding: [0xb9,0x0e,0x00,0xf0]
+#CHECK: eregg %r7, %r8 # encoding: [0xb9,0x0e,0x00,0x78]
+
+ eregg %r0,%r0
+ eregg %r0,%r15
+ eregg %r15,%r0
+ eregg %r7,%r8
+
+#CHECK: esar %r0 # encoding: [0xb2,0x27,0x00,0x00]
+#CHECK: esar %r1 # encoding: [0xb2,0x27,0x00,0x10]
+#CHECK: esar %r15 # encoding: [0xb2,0x27,0x00,0xf0]
+
+ esar %r0
+ esar %r1
+ esar %r15
+
+#CHECK: esair %r0 # encoding: [0xb9,0x9b,0x00,0x00]
+#CHECK: esair %r1 # encoding: [0xb9,0x9b,0x00,0x10]
+#CHECK: esair %r15 # encoding: [0xb9,0x9b,0x00,0xf0]
+
+ esair %r0
+ esair %r1
+ esair %r15
+
#CHECK: esdtr %f0, %f9 # encoding: [0xb3,0xe7,0x00,0x09]
#CHECK: esdtr %f0, %f15 # encoding: [0xb3,0xe7,0x00,0x0f]
#CHECK: esdtr %f15, %f0 # encoding: [0xb3,0xe7,0x00,0xf0]
@@ -7251,6 +7409,24 @@
esdtr %f15,%f0
esdtr %f15,%f9
+#CHECK: esea %r0 # encoding: [0xb9,0x9d,0x00,0x00]
+#CHECK: esea %r1 # encoding: [0xb9,0x9d,0x00,0x10]
+#CHECK: esea %r15 # encoding: [0xb9,0x9d,0x00,0xf0]
+
+ esea %r0
+ esea %r1
+ esea %r15
+
+#CHECK: esta %r0, %r0 # encoding: [0xb2,0x4a,0x00,0x00]
+#CHECK: esta %r0, %r15 # encoding: [0xb2,0x4a,0x00,0x0f]
+#CHECK: esta %r14, %r0 # encoding: [0xb2,0x4a,0x00,0xe0]
+#CHECK: esta %r6, %r8 # encoding: [0xb2,0x4a,0x00,0x68]
+
+ esta %r0,%r0
+ esta %r0,%r15
+ esta %r14,%r0
+ esta %r6,%r8
+
#CHECK: esxtr %f0, %f8 # encoding: [0xb3,0xef,0x00,0x08]
#CHECK: esxtr %f0, %f13 # encoding: [0xb3,0xef,0x00,0x0d]
#CHECK: esxtr %f13, %f0 # encoding: [0xb3,0xef,0x00,0xd0]
@@ -7438,6 +7614,18 @@
her %f7, %f8
her %f15, %f0
+#CHECK: hsch # encoding: [0xb2,0x31,0x00,0x00]
+
+ hsch
+
+#CHECK: iac %r0 # encoding: [0xb2,0x24,0x00,0x00]
+#CHECK: iac %r1 # encoding: [0xb2,0x24,0x00,0x10]
+#CHECK: iac %r15 # encoding: [0xb2,0x24,0x00,0xf0]
+
+ iac %r0
+ iac %r1
+ iac %r15
+
#CHECK: ic %r0, 0 # encoding: [0x43,0x00,0x00,0x00]
#CHECK: ic %r0, 4095 # encoding: [0x43,0x00,0x0f,0xff]
#CHECK: ic %r0, 0(%r1) # encoding: [0x43,0x00,0x10,0x00]
@@ -7536,6 +7724,20 @@
icy %r0, 524287(%r15,%r1)
icy %r15, 0
+#CHECK: idte %r0, %r0, %r0 # encoding: [0xb9,0x8e,0x00,0x00]
+#CHECK: idte %r0, %r0, %r15 # encoding: [0xb9,0x8e,0x00,0x0f]
+#CHECK: idte %r0, %r15, %r0 # encoding: [0xb9,0x8e,0xf0,0x00]
+#CHECK: idte %r15, %r0, %r0 # encoding: [0xb9,0x8e,0x00,0xf0]
+#CHECK: idte %r0, %r0, %r0, 15 # encoding: [0xb9,0x8e,0x0f,0x00]
+#CHECK: idte %r4, %r5, %r6, 7 # encoding: [0xb9,0x8e,0x57,0x46]
+
+ idte %r0, %r0, %r0
+ idte %r0, %r0, %r15
+ idte %r0, %r15, %r0
+ idte %r15, %r0, %r0
+ idte %r0, %r0, %r0, 15
+ idte %r4, %r5, %r6, 7
+
#CHECK: iedtr %f0, %f0, %f0 # encoding: [0xb3,0xf6,0x00,0x00]
#CHECK: iedtr %f0, %f0, %f15 # encoding: [0xb3,0xf6,0x00,0x0f]
#CHECK: iedtr %f0, %f15, %f0 # encoding: [0xb3,0xf6,0xf0,0x00]
@@ -7620,6 +7822,10 @@
iill %r0, 0xffff
iill %r15, 0
+#CHECK: ipk # encoding: [0xb2,0x0b,0x00,0x00]
+
+ ipk
+
#CHECK: ipm %r0 # encoding: [0xb2,0x22,0x00,0x00]
#CHECK: ipm %r1 # encoding: [0xb2,0x22,0x00,0x10]
#CHECK: ipm %r15 # encoding: [0xb2,0x22,0x00,0xf0]
@@ -7628,6 +7834,40 @@
ipm %r1
ipm %r15
+#CHECK: ipte %r0, %r0 # encoding: [0xb2,0x21,0x00,0x00]
+#CHECK: ipte %r0, %r15 # encoding: [0xb2,0x21,0x00,0x0f]
+#CHECK: ipte %r15, %r0 # encoding: [0xb2,0x21,0x00,0xf0]
+#CHECK: ipte %r0, %r0, %r15 # encoding: [0xb2,0x21,0xf0,0x00]
+#CHECK: ipte %r0, %r0, %r0, 15 # encoding: [0xb2,0x21,0x0f,0x00]
+#CHECK: ipte %r7, %r8, %r9, 10 # encoding: [0xb2,0x21,0x9a,0x78]
+
+ ipte %r0, %r0
+ ipte %r0, %r15
+ ipte %r15, %r0
+ ipte %r0, %r0, %r15
+ ipte %r0, %r0, %r0, 15
+ ipte %r7, %r8, %r9, 10
+
+#CHECK: iske %r0, %r0 # encoding: [0xb2,0x29,0x00,0x00]
+#CHECK: iske %r0, %r15 # encoding: [0xb2,0x29,0x00,0x0f]
+#CHECK: iske %r15, %r0 # encoding: [0xb2,0x29,0x00,0xf0]
+#CHECK: iske %r7, %r8 # encoding: [0xb2,0x29,0x00,0x78]
+
+ iske %r0,%r0
+ iske %r0,%r15
+ iske %r15,%r0
+ iske %r7,%r8
+
+#CHECK: ivsk %r0, %r0 # encoding: [0xb2,0x23,0x00,0x00]
+#CHECK: ivsk %r0, %r15 # encoding: [0xb2,0x23,0x00,0x0f]
+#CHECK: ivsk %r15, %r0 # encoding: [0xb2,0x23,0x00,0xf0]
+#CHECK: ivsk %r7, %r8 # encoding: [0xb2,0x23,0x00,0x78]
+
+ ivsk %r0,%r0
+ ivsk %r0,%r15
+ ivsk %r15,%r0
+ ivsk %r7,%r8
+
#CHECK: kdb %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x18]
#CHECK: kdb %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x18]
#CHECK: kdb %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x18]
@@ -7917,6 +8157,28 @@
larl %r7,frob@PLT
larl %r8,frob@PLT
+#CHECK: lasp 0, 0 # encoding: [0xe5,0x00,0x00,0x00,0x00,0x00]
+#CHECK: lasp 0(%r1), 0(%r2) # encoding: [0xe5,0x00,0x10,0x00,0x20,0x00]
+#CHECK: lasp 160(%r1), 320(%r15) # encoding: [0xe5,0x00,0x10,0xa0,0xf1,0x40]
+#CHECK: lasp 0(%r1), 4095 # encoding: [0xe5,0x00,0x10,0x00,0x0f,0xff]
+#CHECK: lasp 0(%r1), 4095(%r2) # encoding: [0xe5,0x00,0x10,0x00,0x2f,0xff]
+#CHECK: lasp 0(%r1), 4095(%r15) # encoding: [0xe5,0x00,0x10,0x00,0xff,0xff]
+#CHECK: lasp 0(%r1), 0 # encoding: [0xe5,0x00,0x10,0x00,0x00,0x00]
+#CHECK: lasp 0(%r15), 0 # encoding: [0xe5,0x00,0xf0,0x00,0x00,0x00]
+#CHECK: lasp 4095(%r1), 0 # encoding: [0xe5,0x00,0x1f,0xff,0x00,0x00]
+#CHECK: lasp 4095(%r15), 0 # encoding: [0xe5,0x00,0xff,0xff,0x00,0x00]
+
+ lasp 0, 0
+ lasp 0(%r1), 0(%r2)
+ lasp 160(%r1), 320(%r15)
+ lasp 0(%r1), 4095
+ lasp 0(%r1), 4095(%r2)
+ lasp 0(%r1), 4095(%r15)
+ lasp 0(%r1), 0
+ lasp 0(%r15), 0
+ lasp 4095(%r1), 0
+ lasp 4095(%r15), 0
+
#CHECK: lay %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x71]
#CHECK: lay %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x71]
#CHECK: lay %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x71]
@@ -7969,6 +8231,20 @@
lbr %r7, %r8
lbr %r15, %r0
+#CHECK: lcctl 0 # encoding: [0xb2,0x84,0x00,0x00]
+#CHECK: lcctl 0(%r1) # encoding: [0xb2,0x84,0x10,0x00]
+#CHECK: lcctl 0(%r15) # encoding: [0xb2,0x84,0xf0,0x00]
+#CHECK: lcctl 4095 # encoding: [0xb2,0x84,0x0f,0xff]
+#CHECK: lcctl 4095(%r1) # encoding: [0xb2,0x84,0x1f,0xff]
+#CHECK: lcctl 4095(%r15) # encoding: [0xb2,0x84,0xff,0xff]
+
+ lcctl 0
+ lcctl 0(%r1)
+ lcctl 0(%r15)
+ lcctl 4095
+ lcctl 4095(%r1)
+ lcctl 4095(%r15)
+
#CHECK: lcdbr %f0, %f9 # encoding: [0xb3,0x13,0x00,0x09]
#CHECK: lcdbr %f0, %f15 # encoding: [0xb3,0x13,0x00,0x0f]
#CHECK: lcdbr %f15, %f0 # encoding: [0xb3,0x13,0x00,0xf0]
@@ -8039,6 +8315,56 @@
lcr %r15,%r0
lcr %r7,%r8
+#CHECK: lctl %c0, %c0, 0 # encoding: [0xb7,0x00,0x00,0x00]
+#CHECK: lctl %c0, %c15, 0 # encoding: [0xb7,0x0f,0x00,0x00]
+#CHECK: lctl %c14, %c15, 0 # encoding: [0xb7,0xef,0x00,0x00]
+#CHECK: lctl %c15, %c15, 0 # encoding: [0xb7,0xff,0x00,0x00]
+#CHECK: lctl %c0, %c0, 4095 # encoding: [0xb7,0x00,0x0f,0xff]
+#CHECK: lctl %c0, %c0, 1 # encoding: [0xb7,0x00,0x00,0x01]
+#CHECK: lctl %c0, %c0, 0(%r1) # encoding: [0xb7,0x00,0x10,0x00]
+#CHECK: lctl %c0, %c0, 0(%r15) # encoding: [0xb7,0x00,0xf0,0x00]
+#CHECK: lctl %c0, %c0, 4095(%r1) # encoding: [0xb7,0x00,0x1f,0xff]
+#CHECK: lctl %c0, %c0, 4095(%r15) # encoding: [0xb7,0x00,0xff,0xff]
+
+ lctl %c0,%c0,0
+ lctl %c0,%c15,0
+ lctl %c14,%c15,0
+ lctl %c15,%c15,0
+ lctl %c0,%c0,4095
+ lctl %c0,%c0,1
+ lctl %c0,%c0,0(%r1)
+ lctl %c0,%c0,0(%r15)
+ lctl %c0,%c0,4095(%r1)
+ lctl %c0,%c0,4095(%r15)
+
+#CHECK: lctlg %c0, %c0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x2f]
+#CHECK: lctlg %c0, %c15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x2f]
+#CHECK: lctlg %c14, %c15, 0 # encoding: [0xeb,0xef,0x00,0x00,0x00,0x2f]
+#CHECK: lctlg %c15, %c15, 0 # encoding: [0xeb,0xff,0x00,0x00,0x00,0x2f]
+#CHECK: lctlg %c0, %c0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x2f]
+#CHECK: lctlg %c0, %c0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x2f]
+#CHECK: lctlg %c0, %c0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x2f]
+#CHECK: lctlg %c0, %c0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0x2f]
+#CHECK: lctlg %c0, %c0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0x2f]
+#CHECK: lctlg %c0, %c0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0x2f]
+#CHECK: lctlg %c0, %c0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0x2f]
+#CHECK: lctlg %c0, %c0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x2f]
+#CHECK: lctlg %c0, %c0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0x2f]
+
+ lctlg %c0,%c0,0
+ lctlg %c0,%c15,0
+ lctlg %c14,%c15,0
+ lctlg %c15,%c15,0
+ lctlg %c0,%c0,-524288
+ lctlg %c0,%c0,-1
+ lctlg %c0,%c0,0
+ lctlg %c0,%c0,1
+ lctlg %c0,%c0,524287
+ lctlg %c0,%c0,0(%r1)
+ lctlg %c0,%c0,0(%r15)
+ lctlg %c0,%c0,524287(%r1)
+ lctlg %c0,%c0,524287(%r15)
+
#CHECK: lcxbr %f0, %f8 # encoding: [0xb3,0x43,0x00,0x08]
#CHECK: lcxbr %f0, %f13 # encoding: [0xb3,0x43,0x00,0x0d]
#CHECK: lcxbr %f13, %f0 # encoding: [0xb3,0x43,0x00,0xd0]
@@ -9291,6 +9617,20 @@
lnxr %f13,%f0
lnxr %f13,%f9
+#CHECK: lpctl 0 # encoding: [0xb2,0x85,0x00,0x00]
+#CHECK: lpctl 0(%r1) # encoding: [0xb2,0x85,0x10,0x00]
+#CHECK: lpctl 0(%r15) # encoding: [0xb2,0x85,0xf0,0x00]
+#CHECK: lpctl 4095 # encoding: [0xb2,0x85,0x0f,0xff]
+#CHECK: lpctl 4095(%r1) # encoding: [0xb2,0x85,0x1f,0xff]
+#CHECK: lpctl 4095(%r15) # encoding: [0xb2,0x85,0xff,0xff]
+
+ lpctl 0
+ lpctl 0(%r1)
+ lpctl 0(%r15)
+ lpctl 4095
+ lpctl 4095(%r1)
+ lpctl 4095(%r15)
+
#CHECK: lpdbr %f0, %f9 # encoding: [0xb3,0x10,0x00,0x09]
#CHECK: lpdbr %f0, %f15 # encoding: [0xb3,0x10,0x00,0x0f]
#CHECK: lpdbr %f15, %f0 # encoding: [0xb3,0x10,0x00,0xf0]
@@ -9351,6 +9691,20 @@
lpgr %r15,%r0
lpgr %r7,%r8
+#CHECK: lpp 0 # encoding: [0xb2,0x80,0x00,0x00]
+#CHECK: lpp 0(%r1) # encoding: [0xb2,0x80,0x10,0x00]
+#CHECK: lpp 0(%r15) # encoding: [0xb2,0x80,0xf0,0x00]
+#CHECK: lpp 4095 # encoding: [0xb2,0x80,0x0f,0xff]
+#CHECK: lpp 4095(%r1) # encoding: [0xb2,0x80,0x1f,0xff]
+#CHECK: lpp 4095(%r15) # encoding: [0xb2,0x80,0xff,0xff]
+
+ lpp 0
+ lpp 0(%r1)
+ lpp 0(%r15)
+ lpp 4095
+ lpp 4095(%r1)
+ lpp 4095(%r15)
+
#CHECK: lpq %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x8f]
#CHECK: lpq %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x8f]
#CHECK: lpq %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x8f]
@@ -9383,6 +9737,48 @@
lpr %r15,%r0
lpr %r7,%r8
+#CHECK: lpsw 0 # encoding: [0x82,0x00,0x00,0x00]
+#CHECK: lpsw 0(%r1) # encoding: [0x82,0x00,0x10,0x00]
+#CHECK: lpsw 0(%r15) # encoding: [0x82,0x00,0xf0,0x00]
+#CHECK: lpsw 4095 # encoding: [0x82,0x00,0x0f,0xff]
+#CHECK: lpsw 4095(%r1) # encoding: [0x82,0x00,0x1f,0xff]
+#CHECK: lpsw 4095(%r15) # encoding: [0x82,0x00,0xff,0xff]
+
+ lpsw 0
+ lpsw 0(%r1)
+ lpsw 0(%r15)
+ lpsw 4095
+ lpsw 4095(%r1)
+ lpsw 4095(%r15)
+
+#CHECK: lpswe 0 # encoding: [0xb2,0xb2,0x00,0x00]
+#CHECK: lpswe 0(%r1) # encoding: [0xb2,0xb2,0x10,0x00]
+#CHECK: lpswe 0(%r15) # encoding: [0xb2,0xb2,0xf0,0x00]
+#CHECK: lpswe 4095 # encoding: [0xb2,0xb2,0x0f,0xff]
+#CHECK: lpswe 4095(%r1) # encoding: [0xb2,0xb2,0x1f,0xff]
+#CHECK: lpswe 4095(%r15) # encoding: [0xb2,0xb2,0xff,0xff]
+
+ lpswe 0
+ lpswe 0(%r1)
+ lpswe 0(%r15)
+ lpswe 4095
+ lpswe 4095(%r1)
+ lpswe 4095(%r15)
+
+#CHECK: lptea %r0, %r0, %r0, 0 # encoding: [0xb9,0xaa,0x00,0x00]
+#CHECK: lptea %r0, %r0, %r0, 15 # encoding: [0xb9,0xaa,0x0f,0x00]
+#CHECK: lptea %r0, %r0, %r15, 0 # encoding: [0xb9,0xaa,0x00,0x0f]
+#CHECK: lptea %r0, %r15, %r0, 0 # encoding: [0xb9,0xaa,0xf0,0x00]
+#CHECK: lptea %r4, %r5, %r6, 7 # encoding: [0xb9,0xaa,0x57,0x46]
+#CHECK: lptea %r15, %r0, %r0, 0 # encoding: [0xb9,0xaa,0x00,0xf0]
+
+ lptea %r0, %r0, %r0, 0
+ lptea %r0, %r0, %r0, 15
+ lptea %r0, %r0, %r15, 0
+ lptea %r0, %r15, %r0, 0
+ lptea %r4, %r5, %r6, 7
+ lptea %r15, %r0, %r0, 0
+
#CHECK: lpxbr %f0, %f8 # encoding: [0xb3,0x40,0x00,0x08]
#CHECK: lpxbr %f0, %f13 # encoding: [0xb3,0x40,0x00,0x0d]
#CHECK: lpxbr %f13, %f0 # encoding: [0xb3,0x40,0x00,0xd0]
@@ -9413,6 +9809,66 @@
lr %r15,%r0
lr %r15,%r9
+#CHECK: lra %r0, 0 # encoding: [0xb1,0x00,0x00,0x00]
+#CHECK: lra %r0, 4095 # encoding: [0xb1,0x00,0x0f,0xff]
+#CHECK: lra %r0, 0(%r1) # encoding: [0xb1,0x00,0x10,0x00]
+#CHECK: lra %r0, 0(%r15) # encoding: [0xb1,0x00,0xf0,0x00]
+#CHECK: lra %r0, 4095(%r1,%r15) # encoding: [0xb1,0x01,0xff,0xff]
+#CHECK: lra %r0, 4095(%r15,%r1) # encoding: [0xb1,0x0f,0x1f,0xff]
+#CHECK: lra %r15, 0 # encoding: [0xb1,0xf0,0x00,0x00]
+
+ lra %r0, 0
+ lra %r0, 4095
+ lra %r0, 0(%r1)
+ lra %r0, 0(%r15)
+ lra %r0, 4095(%r1,%r15)
+ lra %r0, 4095(%r15,%r1)
+ lra %r15, 0
+
+#CHECK: lrag %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x03]
+#CHECK: lrag %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x03]
+#CHECK: lrag %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x03]
+#CHECK: lrag %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x03]
+#CHECK: lrag %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x03]
+#CHECK: lrag %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x03]
+#CHECK: lrag %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x03]
+#CHECK: lrag %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x03]
+#CHECK: lrag %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x03]
+#CHECK: lrag %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x03]
+
+ lrag %r0, -524288
+ lrag %r0, -1
+ lrag %r0, 0
+ lrag %r0, 1
+ lrag %r0, 524287
+ lrag %r0, 0(%r1)
+ lrag %r0, 0(%r15)
+ lrag %r0, 524287(%r1,%r15)
+ lrag %r0, 524287(%r15,%r1)
+ lrag %r15, 0
+
+#CHECK: lray %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x13]
+#CHECK: lray %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x13]
+#CHECK: lray %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x13]
+#CHECK: lray %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x13]
+#CHECK: lray %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x13]
+#CHECK: lray %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x13]
+#CHECK: lray %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x13]
+#CHECK: lray %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x13]
+#CHECK: lray %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x13]
+#CHECK: lray %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x13]
+
+ lray %r0, -524288
+ lray %r0, -1
+ lray %r0, 0
+ lray %r0, 1
+ lray %r0, 524287
+ lray %r0, 0(%r1)
+ lray %r0, 0(%r15)
+ lray %r0, 524287(%r1,%r15)
+ lray %r0, 524287(%r15,%r1)
+ lray %r15, 0
+
#CHECK: lrdr %f0, %f0 # encoding: [0x25,0x00]
#CHECK: lrdr %f0, %f13 # encoding: [0x25,0x0d]
#CHECK: lrdr %f7, %f8 # encoding: [0x25,0x78]
@@ -9564,6 +10020,20 @@
lrvr %r7,%r8
lrvr %r15,%r15
+#CHECK: lsctl 0 # encoding: [0xb2,0x87,0x00,0x00]
+#CHECK: lsctl 0(%r1) # encoding: [0xb2,0x87,0x10,0x00]
+#CHECK: lsctl 0(%r15) # encoding: [0xb2,0x87,0xf0,0x00]
+#CHECK: lsctl 4095 # encoding: [0xb2,0x87,0x0f,0xff]
+#CHECK: lsctl 4095(%r1) # encoding: [0xb2,0x87,0x1f,0xff]
+#CHECK: lsctl 4095(%r15) # encoding: [0xb2,0x87,0xff,0xff]
+
+ lsctl 0
+ lsctl 0(%r1)
+ lsctl 0(%r15)
+ lsctl 4095
+ lsctl 4095(%r1)
+ lsctl 4095(%r15)
+
#CHECK: lt %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x12]
#CHECK: lt %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x12]
#CHECK: lt %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x12]
@@ -9740,6 +10210,26 @@
ltxtr %f13,%f0
ltxtr %f13,%f9
+#CHECK: lura %r0, %r0 # encoding: [0xb2,0x4b,0x00,0x00]
+#CHECK: lura %r0, %r15 # encoding: [0xb2,0x4b,0x00,0x0f]
+#CHECK: lura %r15, %r0 # encoding: [0xb2,0x4b,0x00,0xf0]
+#CHECK: lura %r7, %r8 # encoding: [0xb2,0x4b,0x00,0x78]
+
+ lura %r0,%r0
+ lura %r0,%r15
+ lura %r15,%r0
+ lura %r7,%r8
+
+#CHECK: lurag %r0, %r0 # encoding: [0xb9,0x05,0x00,0x00]
+#CHECK: lurag %r0, %r15 # encoding: [0xb9,0x05,0x00,0x0f]
+#CHECK: lurag %r15, %r0 # encoding: [0xb9,0x05,0x00,0xf0]
+#CHECK: lurag %r7, %r8 # encoding: [0xb9,0x05,0x00,0x78]
+
+ lurag %r0,%r0
+ lurag %r0,%r15
+ lurag %r15,%r0
+ lurag %r7,%r8
+
#CHECK: lxd %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x25]
#CHECK: lxd %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x25]
#CHECK: lxd %f0, 0(%r15) # encoding: [0xed,0x00,0xf0,0x00,0x00,0x25]
@@ -10578,6 +11068,20 @@
ms %r0, 4095(%r15,%r1)
ms %r15, 0
+#CHECK: msch 0 # encoding: [0xb2,0x32,0x00,0x00]
+#CHECK: msch 0(%r1) # encoding: [0xb2,0x32,0x10,0x00]
+#CHECK: msch 0(%r15) # encoding: [0xb2,0x32,0xf0,0x00]
+#CHECK: msch 4095 # encoding: [0xb2,0x32,0x0f,0xff]
+#CHECK: msch 4095(%r1) # encoding: [0xb2,0x32,0x1f,0xff]
+#CHECK: msch 4095(%r15) # encoding: [0xb2,0x32,0xff,0xff]
+
+ msch 0
+ msch 0(%r1)
+ msch 0(%r15)
+ msch 4095
+ msch 4095(%r1)
+ msch 4095(%r15)
+
#CHECK: msd %f0, %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x3f]
#CHECK: msd %f0, %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x3f]
#CHECK: msd %f0, %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x3f]
@@ -10816,6 +11320,14 @@
msr %r15,%r0
msr %r7,%r8
+#CHECK: msta %r0 # encoding: [0xb2,0x47,0x00,0x00]
+#CHECK: msta %r2 # encoding: [0xb2,0x47,0x00,0x20]
+#CHECK: msta %r14 # encoding: [0xb2,0x47,0x00,0xe0]
+
+ msta %r0
+ msta %r2
+ msta %r14
+
#CHECK: msy %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x51]
#CHECK: msy %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x51]
#CHECK: msy %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x51]
@@ -10864,6 +11376,28 @@
mvc 0(256,%r1), 0
mvc 0(256,%r15), 0
+#CHECK: mvcdk 0, 0 # encoding: [0xe5,0x0f,0x00,0x00,0x00,0x00]
+#CHECK: mvcdk 0(%r1), 0(%r2) # encoding: [0xe5,0x0f,0x10,0x00,0x20,0x00]
+#CHECK: mvcdk 160(%r1), 320(%r15) # encoding: [0xe5,0x0f,0x10,0xa0,0xf1,0x40]
+#CHECK: mvcdk 0(%r1), 4095 # encoding: [0xe5,0x0f,0x10,0x00,0x0f,0xff]
+#CHECK: mvcdk 0(%r1), 4095(%r2) # encoding: [0xe5,0x0f,0x10,0x00,0x2f,0xff]
+#CHECK: mvcdk 0(%r1), 4095(%r15) # encoding: [0xe5,0x0f,0x10,0x00,0xff,0xff]
+#CHECK: mvcdk 0(%r1), 0 # encoding: [0xe5,0x0f,0x10,0x00,0x00,0x00]
+#CHECK: mvcdk 0(%r15), 0 # encoding: [0xe5,0x0f,0xf0,0x00,0x00,0x00]
+#CHECK: mvcdk 4095(%r1), 0 # encoding: [0xe5,0x0f,0x1f,0xff,0x00,0x00]
+#CHECK: mvcdk 4095(%r15), 0 # encoding: [0xe5,0x0f,0xff,0xff,0x00,0x00]
+
+ mvcdk 0, 0
+ mvcdk 0(%r1), 0(%r2)
+ mvcdk 160(%r1), 320(%r15)
+ mvcdk 0(%r1), 4095
+ mvcdk 0(%r1), 4095(%r2)
+ mvcdk 0(%r1), 4095(%r15)
+ mvcdk 0(%r1), 0
+ mvcdk 0(%r15), 0
+ mvcdk 4095(%r1), 0
+ mvcdk 4095(%r15), 0
+
#CHECK: mvcin 0(1), 0 # encoding: [0xe8,0x00,0x00,0x00,0x00,0x00]
#CHECK: mvcin 0(1), 0(%r1) # encoding: [0xe8,0x00,0x00,0x00,0x10,0x00]
#CHECK: mvcin 0(1), 0(%r15) # encoding: [0xe8,0x00,0x00,0x00,0xf0,0x00]
@@ -10966,6 +11500,98 @@
mvclu %r0, %r0, 524287(%r1)
mvclu %r14, %r0, 0
+#CHECK: mvcos 0, 0, %r0 # encoding: [0xc8,0x00,0x00,0x00,0x00,0x00]
+#CHECK: mvcos 0(%r1), 0(%r15), %r2 # encoding: [0xc8,0x20,0x10,0x00,0xf0,0x00]
+#CHECK: mvcos 1(%r1), 0(%r15), %r2 # encoding: [0xc8,0x20,0x10,0x01,0xf0,0x00]
+#CHECK: mvcos 4095(%r1), 0(%r15), %r2 # encoding: [0xc8,0x20,0x1f,0xff,0xf0,0x00]
+#CHECK: mvcos 0(%r1), 1(%r15), %r2 # encoding: [0xc8,0x20,0x10,0x00,0xf0,0x01]
+#CHECK: mvcos 0(%r1), 4095(%r15), %r2 # encoding: [0xc8,0x20,0x10,0x00,0xff,0xff]
+
+ mvcos 0, 0, %r0
+ mvcos 0(%r1), 0(%r15), %r2
+ mvcos 1(%r1), 0(%r15), %r2
+ mvcos 4095(%r1), 0(%r15), %r2
+ mvcos 0(%r1), 1(%r15), %r2
+ mvcos 0(%r1), 4095(%r15), %r2
+
+#CHECK: mvcp 0(%r0), 0, %r3 # encoding: [0xda,0x03,0x00,0x00,0x00,0x00]
+#CHECK: mvcp 0(%r1), 0, %r3 # encoding: [0xda,0x13,0x00,0x00,0x00,0x00]
+#CHECK: mvcp 0(%r1), 0(%r1), %r3 # encoding: [0xda,0x13,0x00,0x00,0x10,0x00]
+#CHECK: mvcp 0(%r1), 0(%r15), %r3 # encoding: [0xda,0x13,0x00,0x00,0xf0,0x00]
+#CHECK: mvcp 0(%r1), 4095, %r3 # encoding: [0xda,0x13,0x00,0x00,0x0f,0xff]
+#CHECK: mvcp 0(%r1), 4095(%r1), %r3 # encoding: [0xda,0x13,0x00,0x00,0x1f,0xff]
+#CHECK: mvcp 0(%r1), 4095(%r15), %r3 # encoding: [0xda,0x13,0x00,0x00,0xff,0xff]
+#CHECK: mvcp 0(%r2,%r1), 0, %r3 # encoding: [0xda,0x23,0x10,0x00,0x00,0x00]
+#CHECK: mvcp 0(%r2,%r15), 0, %r3 # encoding: [0xda,0x23,0xf0,0x00,0x00,0x00]
+#CHECK: mvcp 4095(%r2,%r1), 0, %r3 # encoding: [0xda,0x23,0x1f,0xff,0x00,0x00]
+#CHECK: mvcp 4095(%r2,%r15), 0, %r3 # encoding: [0xda,0x23,0xff,0xff,0x00,0x00]
+#CHECK: mvcp 0(%r2,%r1), 0, %r3 # encoding: [0xda,0x23,0x10,0x00,0x00,0x00]
+#CHECK: mvcp 0(%r2,%r15), 0, %r3 # encoding: [0xda,0x23,0xf0,0x00,0x00,0x00]
+
+ mvcp 0(%r0), 0, %r3
+ mvcp 0(%r1), 0, %r3
+ mvcp 0(%r1), 0(%r1), %r3
+ mvcp 0(%r1), 0(%r15), %r3
+ mvcp 0(%r1), 4095, %r3
+ mvcp 0(%r1), 4095(%r1), %r3
+ mvcp 0(%r1), 4095(%r15), %r3
+ mvcp 0(%r2,%r1), 0, %r3
+ mvcp 0(%r2,%r15), 0, %r3
+ mvcp 4095(%r2,%r1), 0, %r3
+ mvcp 4095(%r2,%r15), 0, %r3
+ mvcp 0(%r2,%r1), 0, %r3
+ mvcp 0(%r2,%r15), 0, %r3
+
+#CHECK: mvcs 0(%r0), 0, %r3 # encoding: [0xdb,0x03,0x00,0x00,0x00,0x00]
+#CHECK: mvcs 0(%r1), 0, %r3 # encoding: [0xdb,0x13,0x00,0x00,0x00,0x00]
+#CHECK: mvcs 0(%r1), 0(%r1), %r3 # encoding: [0xdb,0x13,0x00,0x00,0x10,0x00]
+#CHECK: mvcs 0(%r1), 0(%r15), %r3 # encoding: [0xdb,0x13,0x00,0x00,0xf0,0x00]
+#CHECK: mvcs 0(%r1), 4095, %r3 # encoding: [0xdb,0x13,0x00,0x00,0x0f,0xff]
+#CHECK: mvcs 0(%r1), 4095(%r1), %r3 # encoding: [0xdb,0x13,0x00,0x00,0x1f,0xff]
+#CHECK: mvcs 0(%r1), 4095(%r15), %r3 # encoding: [0xdb,0x13,0x00,0x00,0xff,0xff]
+#CHECK: mvcs 0(%r2,%r1), 0, %r3 # encoding: [0xdb,0x23,0x10,0x00,0x00,0x00]
+#CHECK: mvcs 0(%r2,%r15), 0, %r3 # encoding: [0xdb,0x23,0xf0,0x00,0x00,0x00]
+#CHECK: mvcs 4095(%r2,%r1), 0, %r3 # encoding: [0xdb,0x23,0x1f,0xff,0x00,0x00]
+#CHECK: mvcs 4095(%r2,%r15), 0, %r3 # encoding: [0xdb,0x23,0xff,0xff,0x00,0x00]
+#CHECK: mvcs 0(%r2,%r1), 0, %r3 # encoding: [0xdb,0x23,0x10,0x00,0x00,0x00]
+#CHECK: mvcs 0(%r2,%r15), 0, %r3 # encoding: [0xdb,0x23,0xf0,0x00,0x00,0x00]
+
+ mvcs 0(%r0), 0, %r3
+ mvcs 0(%r1), 0, %r3
+ mvcs 0(%r1), 0(%r1), %r3
+ mvcs 0(%r1), 0(%r15), %r3
+ mvcs 0(%r1), 4095, %r3
+ mvcs 0(%r1), 4095(%r1), %r3
+ mvcs 0(%r1), 4095(%r15), %r3
+ mvcs 0(%r2,%r1), 0, %r3
+ mvcs 0(%r2,%r15), 0, %r3
+ mvcs 4095(%r2,%r1), 0, %r3
+ mvcs 4095(%r2,%r15), 0, %r3
+ mvcs 0(%r2,%r1), 0, %r3
+ mvcs 0(%r2,%r15), 0, %r3
+
+#CHECK: mvcsk 0, 0 # encoding: [0xe5,0x0e,0x00,0x00,0x00,0x00]
+#CHECK: mvcsk 0(%r1), 0(%r2) # encoding: [0xe5,0x0e,0x10,0x00,0x20,0x00]
+#CHECK: mvcsk 160(%r1), 320(%r15) # encoding: [0xe5,0x0e,0x10,0xa0,0xf1,0x40]
+#CHECK: mvcsk 0(%r1), 4095 # encoding: [0xe5,0x0e,0x10,0x00,0x0f,0xff]
+#CHECK: mvcsk 0(%r1), 4095(%r2) # encoding: [0xe5,0x0e,0x10,0x00,0x2f,0xff]
+#CHECK: mvcsk 0(%r1), 4095(%r15) # encoding: [0xe5,0x0e,0x10,0x00,0xff,0xff]
+#CHECK: mvcsk 0(%r1), 0 # encoding: [0xe5,0x0e,0x10,0x00,0x00,0x00]
+#CHECK: mvcsk 0(%r15), 0 # encoding: [0xe5,0x0e,0xf0,0x00,0x00,0x00]
+#CHECK: mvcsk 4095(%r1), 0 # encoding: [0xe5,0x0e,0x1f,0xff,0x00,0x00]
+#CHECK: mvcsk 4095(%r15), 0 # encoding: [0xe5,0x0e,0xff,0xff,0x00,0x00]
+
+ mvcsk 0, 0
+ mvcsk 0(%r1), 0(%r2)
+ mvcsk 160(%r1), 320(%r15)
+ mvcsk 0(%r1), 4095
+ mvcsk 0(%r1), 4095(%r2)
+ mvcsk 0(%r1), 4095(%r15)
+ mvcsk 0(%r1), 0
+ mvcsk 0(%r15), 0
+ mvcsk 4095(%r1), 0
+ mvcsk 4095(%r15), 0
+
#CHECK: mvghi 0, 0 # encoding: [0xe5,0x48,0x00,0x00,0x00,0x00]
#CHECK: mvghi 4095, 0 # encoding: [0xe5,0x48,0x0f,0xff,0x00,0x00]
#CHECK: mvghi 0, -32768 # encoding: [0xe5,0x48,0x00,0x00,0x80,0x00]
@@ -11132,6 +11758,16 @@
mvo 0(1), 0(16,%r1)
mvo 0(1), 0(16,%r15)
+#CHECK: mvpg %r0, %r0 # encoding: [0xb2,0x54,0x00,0x00]
+#CHECK: mvpg %r0, %r15 # encoding: [0xb2,0x54,0x00,0x0f]
+#CHECK: mvpg %r15, %r0 # encoding: [0xb2,0x54,0x00,0xf0]
+#CHECK: mvpg %r7, %r8 # encoding: [0xb2,0x54,0x00,0x78]
+
+ mvpg %r0,%r0
+ mvpg %r0,%r15
+ mvpg %r15,%r0
+ mvpg %r7,%r8
+
#CHECK: mvst %r0, %r0 # encoding: [0xb2,0x55,0x00,0x00]
#CHECK: mvst %r0, %r15 # encoding: [0xb2,0x55,0x00,0x0f]
#CHECK: mvst %r15, %r0 # encoding: [0xb2,0x55,0x00,0xf0]
@@ -11790,6 +12426,24 @@
pack 0(1), 0(16,%r1)
pack 0(1), 0(16,%r15)
+#CHECK: palb # encoding: [0xb2,0x48,0x00,0x00]
+
+ palb
+
+#CHECK: pc 0 # encoding: [0xb2,0x18,0x00,0x00]
+#CHECK: pc 0(%r1) # encoding: [0xb2,0x18,0x10,0x00]
+#CHECK: pc 0(%r15) # encoding: [0xb2,0x18,0xf0,0x00]
+#CHECK: pc 4095 # encoding: [0xb2,0x18,0x0f,0xff]
+#CHECK: pc 4095(%r1) # encoding: [0xb2,0x18,0x1f,0xff]
+#CHECK: pc 4095(%r15) # encoding: [0xb2,0x18,0xff,0xff]
+
+ pc 0
+ pc 0(%r1)
+ pc 0(%r15)
+ pc 4095
+ pc 4095(%r1)
+ pc 4095(%r15)
+
#CHECK: pfd 0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x36]
#CHECK: pfd 0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x36]
#CHECK: pfd 0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x36]
@@ -11849,9 +12503,45 @@
pfdrl 7, frob@PLT
pfdrl 8, frob@PLT
+#CHECK: pfmf %r0, %r0 # encoding: [0xb9,0xaf,0x00,0x00]
+#CHECK: pfmf %r0, %r15 # encoding: [0xb9,0xaf,0x00,0x0f]
+#CHECK: pfmf %r15, %r0 # encoding: [0xb9,0xaf,0x00,0xf0]
+#CHECK: pfmf %r7, %r8 # encoding: [0xb9,0xaf,0x00,0x78]
+#CHECK: pfmf %r15, %r15 # encoding: [0xb9,0xaf,0x00,0xff]
+
+ pfmf %r0,%r0
+ pfmf %r0,%r15
+ pfmf %r15,%r0
+ pfmf %r7,%r8
+ pfmf %r15,%r15
+
#CHECK: pfpo # encoding: [0x01,0x0a]
pfpo
+#CHECK: pgin %r0, %r0 # encoding: [0xb2,0x2e,0x00,0x00]
+#CHECK: pgin %r0, %r15 # encoding: [0xb2,0x2e,0x00,0x0f]
+#CHECK: pgin %r15, %r0 # encoding: [0xb2,0x2e,0x00,0xf0]
+#CHECK: pgin %r7, %r8 # encoding: [0xb2,0x2e,0x00,0x78]
+#CHECK: pgin %r15, %r15 # encoding: [0xb2,0x2e,0x00,0xff]
+
+ pgin %r0,%r0
+ pgin %r0,%r15
+ pgin %r15,%r0
+ pgin %r7,%r8
+ pgin %r15,%r15
+
+#CHECK: pgout %r0, %r0 # encoding: [0xb2,0x2f,0x00,0x00]
+#CHECK: pgout %r0, %r15 # encoding: [0xb2,0x2f,0x00,0x0f]
+#CHECK: pgout %r15, %r0 # encoding: [0xb2,0x2f,0x00,0xf0]
+#CHECK: pgout %r7, %r8 # encoding: [0xb2,0x2f,0x00,0x78]
+#CHECK: pgout %r15, %r15 # encoding: [0xb2,0x2f,0x00,0xff]
+
+ pgout %r0,%r0
+ pgout %r0,%r15
+ pgout %r15,%r0
+ pgout %r7,%r8
+ pgout %r15,%r15
+
#CHECK: pka 0, 0(1) # encoding: [0xe9,0x00,0x00,0x00,0x00,0x00]
#CHECK: pka 0, 0(1,%r1) # encoding: [0xe9,0x00,0x00,0x00,0x10,0x00]
#CHECK: pka 0, 0(1,%r15) # encoding: [0xe9,0x00,0x00,0x00,0xf0,0x00]
@@ -11919,8 +12609,45 @@
plo %r2, 0(%r1), %r4, 4095(%r15)
#CHECK: pr # encoding: [0x01,0x01]
+
pr
+#CHECK: pt %r0, %r0 # encoding: [0xb2,0x28,0x00,0x00]
+#CHECK: pt %r0, %r15 # encoding: [0xb2,0x28,0x00,0x0f]
+#CHECK: pt %r15, %r0 # encoding: [0xb2,0x28,0x00,0xf0]
+#CHECK: pt %r7, %r8 # encoding: [0xb2,0x28,0x00,0x78]
+
+ pt %r0,%r0
+ pt %r0,%r15
+ pt %r15,%r0
+ pt %r7,%r8
+
+#CHECK: ptf %r0 # encoding: [0xb9,0xa2,0x00,0x00]
+#CHECK: ptf %r1 # encoding: [0xb9,0xa2,0x00,0x10]
+#CHECK: ptf %r15 # encoding: [0xb9,0xa2,0x00,0xf0]
+
+ ptf %r0
+ ptf %r1
+ ptf %r15
+
+#CHECK: ptff # encoding: [0x01,0x04]
+
+ ptff
+
+#CHECK: pti %r0, %r0 # encoding: [0xb9,0x9e,0x00,0x00]
+#CHECK: pti %r0, %r15 # encoding: [0xb9,0x9e,0x00,0x0f]
+#CHECK: pti %r15, %r0 # encoding: [0xb9,0x9e,0x00,0xf0]
+#CHECK: pti %r7, %r8 # encoding: [0xb9,0x9e,0x00,0x78]
+
+ pti %r0,%r0
+ pti %r0,%r15
+ pti %r15,%r0
+ pti %r7,%r8
+
+#CHECK: ptlb # encoding: [0xb2,0x0d,0x00,0x00]
+
+ ptlb
+
#CHECK: qadtr %f0, %f0, %f0, 0 # encoding: [0xb3,0xf5,0x00,0x00]
#CHECK: qadtr %f0, %f0, %f0, 15 # encoding: [0xb3,0xf5,0x0f,0x00]
#CHECK: qadtr %f0, %f0, %f15, 0 # encoding: [0xb3,0xf5,0x00,0x0f]
@@ -11949,6 +12676,38 @@
qaxtr %f8, %f8, %f8, 8
qaxtr %f13, %f0, %f0, 0
+#CHECK: qctri 0 # encoding: [0xb2,0x8e,0x00,0x00]
+#CHECK: qctri 0(%r1) # encoding: [0xb2,0x8e,0x10,0x00]
+#CHECK: qctri 0(%r15) # encoding: [0xb2,0x8e,0xf0,0x00]
+#CHECK: qctri 4095 # encoding: [0xb2,0x8e,0x0f,0xff]
+#CHECK: qctri 4095(%r1) # encoding: [0xb2,0x8e,0x1f,0xff]
+#CHECK: qctri 4095(%r15) # encoding: [0xb2,0x8e,0xff,0xff]
+
+ qctri 0
+ qctri 0(%r1)
+ qctri 0(%r15)
+ qctri 4095
+ qctri 4095(%r1)
+ qctri 4095(%r15)
+
+#CHECK: qsi 0 # encoding: [0xb2,0x86,0x00,0x00]
+#CHECK: qsi 0(%r1) # encoding: [0xb2,0x86,0x10,0x00]
+#CHECK: qsi 0(%r15) # encoding: [0xb2,0x86,0xf0,0x00]
+#CHECK: qsi 4095 # encoding: [0xb2,0x86,0x0f,0xff]
+#CHECK: qsi 4095(%r1) # encoding: [0xb2,0x86,0x1f,0xff]
+#CHECK: qsi 4095(%r15) # encoding: [0xb2,0x86,0xff,0xff]
+
+ qsi 0
+ qsi 0(%r1)
+ qsi 0(%r15)
+ qsi 4095
+ qsi 4095(%r1)
+ qsi 4095(%r15)
+
+#CHECK: rchp # encoding: [0xb2,0x3b,0x00,0x00]
+
+ rchp
+
#CHECK: risbg %r0, %r0, 0, 0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0x55]
#CHECK: risbg %r0, %r0, 0, 0, 63 # encoding: [0xec,0x00,0x00,0x00,0x3f,0x55]
#CHECK: risbg %r0, %r0, 0, 255, 0 # encoding: [0xec,0x00,0x00,0xff,0x00,0x55]
@@ -12049,6 +12808,32 @@
rosbg %r15,%r0,0,0,0
rosbg %r4,%r5,6,7,8
+#CHECK: rp 0 # encoding: [0xb2,0x77,0x00,0x00]
+#CHECK: rp 0(%r1) # encoding: [0xb2,0x77,0x10,0x00]
+#CHECK: rp 0(%r15) # encoding: [0xb2,0x77,0xf0,0x00]
+#CHECK: rp 4095 # encoding: [0xb2,0x77,0x0f,0xff]
+#CHECK: rp 4095(%r1) # encoding: [0xb2,0x77,0x1f,0xff]
+#CHECK: rp 4095(%r15) # encoding: [0xb2,0x77,0xff,0xff]
+
+ rp 0
+ rp 0(%r1)
+ rp 0(%r15)
+ rp 4095
+ rp 4095(%r1)
+ rp 4095(%r15)
+
+#CHECK: rrbe %r0, %r0 # encoding: [0xb2,0x2a,0x00,0x00]
+#CHECK: rrbe %r0, %r15 # encoding: [0xb2,0x2a,0x00,0x0f]
+#CHECK: rrbe %r15, %r0 # encoding: [0xb2,0x2a,0x00,0xf0]
+#CHECK: rrbe %r7, %r8 # encoding: [0xb2,0x2a,0x00,0x78]
+#CHECK: rrbe %r15, %r15 # encoding: [0xb2,0x2a,0x00,0xff]
+
+ rrbe %r0,%r0
+ rrbe %r0,%r15
+ rrbe %r15,%r0
+ rrbe %r7,%r8
+ rrbe %r15,%r15
+
#CHECK: rrdtr %f0, %f0, %f0, 0 # encoding: [0xb3,0xf7,0x00,0x00]
#CHECK: rrdtr %f0, %f0, %f0, 15 # encoding: [0xb3,0xf7,0x0f,0x00]
#CHECK: rrdtr %f0, %f0, %f15, 0 # encoding: [0xb3,0xf7,0x00,0x0f]
@@ -12077,6 +12862,10 @@
rrxtr %f8, %f8, %f8, 8
rrxtr %f13, %f0, %f0, 0
+#CHECK: rsch # encoding: [0xb2,0x38,0x00,0x00]
+
+ rsch
+
#CHECK: rxsbg %r0, %r0, 0, 0, 0 # encoding: [0xec,0x00,0x00,0x00,0x00,0x57]
#CHECK: rxsbg %r0, %r0, 0, 0, 63 # encoding: [0xec,0x00,0x00,0x00,0x3f,0x57]
#CHECK: rxsbg %r0, %r0, 0, 255, 0 # encoding: [0xec,0x00,0x00,0xff,0x00,0x57]
@@ -12109,6 +12898,38 @@
s %r0, 4095(%r15,%r1)
s %r15, 0
+#CHECK: sac 0 # encoding: [0xb2,0x19,0x00,0x00]
+#CHECK: sac 0(%r1) # encoding: [0xb2,0x19,0x10,0x00]
+#CHECK: sac 0(%r15) # encoding: [0xb2,0x19,0xf0,0x00]
+#CHECK: sac 4095 # encoding: [0xb2,0x19,0x0f,0xff]
+#CHECK: sac 4095(%r1) # encoding: [0xb2,0x19,0x1f,0xff]
+#CHECK: sac 4095(%r15) # encoding: [0xb2,0x19,0xff,0xff]
+
+ sac 0
+ sac 0(%r1)
+ sac 0(%r15)
+ sac 4095
+ sac 4095(%r1)
+ sac 4095(%r15)
+
+#CHECK: sacf 0 # encoding: [0xb2,0x79,0x00,0x00]
+#CHECK: sacf 0(%r1) # encoding: [0xb2,0x79,0x10,0x00]
+#CHECK: sacf 0(%r15) # encoding: [0xb2,0x79,0xf0,0x00]
+#CHECK: sacf 4095 # encoding: [0xb2,0x79,0x0f,0xff]
+#CHECK: sacf 4095(%r1) # encoding: [0xb2,0x79,0x1f,0xff]
+#CHECK: sacf 4095(%r15) # encoding: [0xb2,0x79,0xff,0xff]
+
+ sacf 0
+ sacf 0(%r1)
+ sacf 0(%r15)
+ sacf 4095
+ sacf 4095(%r1)
+ sacf 4095(%r15)
+
+#CHECK: sal # encoding: [0xb2,0x37,0x00,0x00]
+
+ sal
+
#CHECK: sam24 # encoding: [0x01,0x0c]
#CHECK: sam31 # encoding: [0x01,0x0d]
#CHECK: sam64 # encoding: [0x01,0x0e]
@@ -12129,6 +12950,52 @@
sar %a7, %r8
sar %a15, %r15
+#CHECK: scctr %r0, %r0 # encoding: [0xb2,0xe0,0x00,0x00]
+#CHECK: scctr %r0, %r15 # encoding: [0xb2,0xe0,0x00,0x0f]
+#CHECK: scctr %r15, %r0 # encoding: [0xb2,0xe0,0x00,0xf0]
+#CHECK: scctr %r7, %r8 # encoding: [0xb2,0xe0,0x00,0x78]
+
+ scctr %r0,%r0
+ scctr %r0,%r15
+ scctr %r15,%r0
+ scctr %r7,%r8
+
+#CHECK: schm # encoding: [0xb2,0x3c,0x00,0x00]
+
+ schm
+
+#CHECK: sck 0 # encoding: [0xb2,0x04,0x00,0x00]
+#CHECK: sck 0(%r1) # encoding: [0xb2,0x04,0x10,0x00]
+#CHECK: sck 0(%r15) # encoding: [0xb2,0x04,0xf0,0x00]
+#CHECK: sck 4095 # encoding: [0xb2,0x04,0x0f,0xff]
+#CHECK: sck 4095(%r1) # encoding: [0xb2,0x04,0x1f,0xff]
+#CHECK: sck 4095(%r15) # encoding: [0xb2,0x04,0xff,0xff]
+
+ sck 0
+ sck 0(%r1)
+ sck 0(%r15)
+ sck 4095
+ sck 4095(%r1)
+ sck 4095(%r15)
+
+#CHECK: sckc 0 # encoding: [0xb2,0x06,0x00,0x00]
+#CHECK: sckc 0(%r1) # encoding: [0xb2,0x06,0x10,0x00]
+#CHECK: sckc 0(%r15) # encoding: [0xb2,0x06,0xf0,0x00]
+#CHECK: sckc 4095 # encoding: [0xb2,0x06,0x0f,0xff]
+#CHECK: sckc 4095(%r1) # encoding: [0xb2,0x06,0x1f,0xff]
+#CHECK: sckc 4095(%r15) # encoding: [0xb2,0x06,0xff,0xff]
+
+ sckc 0
+ sckc 0(%r1)
+ sckc 0(%r15)
+ sckc 4095
+ sckc 4095(%r1)
+ sckc 4095(%r15)
+
+#CHECK: sckpf # encoding: [0x01,0x07]
+
+ sckpf
+
#CHECK: sd %f0, 0 # encoding: [0x6b,0x00,0x00,0x00]
#CHECK: sd %f0, 4095 # encoding: [0x6b,0x00,0x0f,0xff]
#CHECK: sd %f0, 0(%r1) # encoding: [0x6b,0x00,0x10,0x00]
@@ -12363,6 +13230,56 @@
shy %r0, 524287(%r15,%r1)
shy %r15, 0
+#CHECK: sie 0 # encoding: [0xb2,0x14,0x00,0x00]
+#CHECK: sie 0(%r1) # encoding: [0xb2,0x14,0x10,0x00]
+#CHECK: sie 0(%r15) # encoding: [0xb2,0x14,0xf0,0x00]
+#CHECK: sie 4095 # encoding: [0xb2,0x14,0x0f,0xff]
+#CHECK: sie 4095(%r1) # encoding: [0xb2,0x14,0x1f,0xff]
+#CHECK: sie 4095(%r15) # encoding: [0xb2,0x14,0xff,0xff]
+
+ sie 0
+ sie 0(%r1)
+ sie 0(%r15)
+ sie 4095
+ sie 4095(%r1)
+ sie 4095(%r15)
+
+#CHECK: siga 0 # encoding: [0xb2,0x74,0x00,0x00]
+#CHECK: siga 0(%r1) # encoding: [0xb2,0x74,0x10,0x00]
+#CHECK: siga 0(%r15) # encoding: [0xb2,0x74,0xf0,0x00]
+#CHECK: siga 4095 # encoding: [0xb2,0x74,0x0f,0xff]
+#CHECK: siga 4095(%r1) # encoding: [0xb2,0x74,0x1f,0xff]
+#CHECK: siga 4095(%r15) # encoding: [0xb2,0x74,0xff,0xff]
+
+ siga 0
+ siga 0(%r1)
+ siga 0(%r15)
+ siga 4095
+ siga 4095(%r1)
+ siga 4095(%r15)
+
+#CHECK: sigp %r0, %r0, 0 # encoding: [0xae,0x00,0x00,0x00]
+#CHECK: sigp %r0, %r15, 0 # encoding: [0xae,0x0f,0x00,0x00]
+#CHECK: sigp %r14, %r15, 0 # encoding: [0xae,0xef,0x00,0x00]
+#CHECK: sigp %r15, %r15, 0 # encoding: [0xae,0xff,0x00,0x00]
+#CHECK: sigp %r0, %r0, 4095 # encoding: [0xae,0x00,0x0f,0xff]
+#CHECK: sigp %r0, %r0, 1 # encoding: [0xae,0x00,0x00,0x01]
+#CHECK: sigp %r0, %r0, 0(%r1) # encoding: [0xae,0x00,0x10,0x00]
+#CHECK: sigp %r0, %r0, 0(%r15) # encoding: [0xae,0x00,0xf0,0x00]
+#CHECK: sigp %r0, %r0, 4095(%r1) # encoding: [0xae,0x00,0x1f,0xff]
+#CHECK: sigp %r0, %r0, 4095(%r15) # encoding: [0xae,0x00,0xff,0xff]
+
+ sigp %r0,%r0,0
+ sigp %r0,%r15,0
+ sigp %r14,%r15,0
+ sigp %r15,%r15,0
+ sigp %r0,%r0,4095
+ sigp %r0,%r0,1
+ sigp %r0,%r0,0(%r1)
+ sigp %r0,%r0,0(%r15)
+ sigp %r0,%r0,4095(%r1)
+ sigp %r0,%r0,4095(%r15)
+
#CHECK: sl %r0, 0 # encoding: [0x5f,0x00,0x00,0x00]
#CHECK: sl %r0, 4095 # encoding: [0x5f,0x00,0x0f,0xff]
#CHECK: sl %r0, 0(%r1) # encoding: [0x5f,0x00,0x10,0x00]
@@ -12749,6 +13666,30 @@
sp 0(1), 0(16,%r1)
sp 0(1), 0(16,%r15)
+#CHECK: spctr %r0, %r0 # encoding: [0xb2,0xe1,0x00,0x00]
+#CHECK: spctr %r0, %r15 # encoding: [0xb2,0xe1,0x00,0x0f]
+#CHECK: spctr %r15, %r0 # encoding: [0xb2,0xe1,0x00,0xf0]
+#CHECK: spctr %r7, %r8 # encoding: [0xb2,0xe1,0x00,0x78]
+
+ spctr %r0,%r0
+ spctr %r0,%r15
+ spctr %r15,%r0
+ spctr %r7,%r8
+
+#CHECK: spka 0 # encoding: [0xb2,0x0a,0x00,0x00]
+#CHECK: spka 0(%r1) # encoding: [0xb2,0x0a,0x10,0x00]
+#CHECK: spka 0(%r15) # encoding: [0xb2,0x0a,0xf0,0x00]
+#CHECK: spka 4095 # encoding: [0xb2,0x0a,0x0f,0xff]
+#CHECK: spka 4095(%r1) # encoding: [0xb2,0x0a,0x1f,0xff]
+#CHECK: spka 4095(%r15) # encoding: [0xb2,0x0a,0xff,0xff]
+
+ spka 0
+ spka 0(%r1)
+ spka 0(%r15)
+ spka 4095
+ spka 4095(%r1)
+ spka 4095(%r15)
+
#CHECK: spm %r0 # encoding: [0x04,0x00]
#CHECK: spm %r1 # encoding: [0x04,0x10]
#CHECK: spm %r15 # encoding: [0x04,0xf0]
@@ -12757,6 +13698,34 @@
spm %r1
spm %r15
+#CHECK: spt 0 # encoding: [0xb2,0x08,0x00,0x00]
+#CHECK: spt 0(%r1) # encoding: [0xb2,0x08,0x10,0x00]
+#CHECK: spt 0(%r15) # encoding: [0xb2,0x08,0xf0,0x00]
+#CHECK: spt 4095 # encoding: [0xb2,0x08,0x0f,0xff]
+#CHECK: spt 4095(%r1) # encoding: [0xb2,0x08,0x1f,0xff]
+#CHECK: spt 4095(%r15) # encoding: [0xb2,0x08,0xff,0xff]
+
+ spt 0
+ spt 0(%r1)
+ spt 0(%r15)
+ spt 4095
+ spt 4095(%r1)
+ spt 4095(%r15)
+
+#CHECK: spx 0 # encoding: [0xb2,0x10,0x00,0x00]
+#CHECK: spx 0(%r1) # encoding: [0xb2,0x10,0x10,0x00]
+#CHECK: spx 0(%r15) # encoding: [0xb2,0x10,0xf0,0x00]
+#CHECK: spx 4095 # encoding: [0xb2,0x10,0x0f,0xff]
+#CHECK: spx 4095(%r1) # encoding: [0xb2,0x10,0x1f,0xff]
+#CHECK: spx 4095(%r15) # encoding: [0xb2,0x10,0xff,0xff]
+
+ spx 0
+ spx 0(%r1)
+ spx 0(%r15)
+ spx 4095
+ spx 4095(%r1)
+ spx 4095(%r15)
+
#CHECK: sqd %f0, 0 # encoding: [0xed,0x00,0x00,0x00,0x00,0x35]
#CHECK: sqd %f0, 4095 # encoding: [0xed,0x00,0x0f,0xff,0x00,0x35]
#CHECK: sqd %f0, 0(%r1) # encoding: [0xed,0x00,0x10,0x00,0x00,0x35]
@@ -13131,6 +14100,62 @@
srxt %f13, %f0, 0
srxt %f13, %f13, 0
+#CHECK: ssar %r0 # encoding: [0xb2,0x25,0x00,0x00]
+#CHECK: ssar %r1 # encoding: [0xb2,0x25,0x00,0x10]
+#CHECK: ssar %r15 # encoding: [0xb2,0x25,0x00,0xf0]
+
+ ssar %r0
+ ssar %r1
+ ssar %r15
+
+#CHECK: ssair %r0 # encoding: [0xb9,0x9f,0x00,0x00]
+#CHECK: ssair %r1 # encoding: [0xb9,0x9f,0x00,0x10]
+#CHECK: ssair %r15 # encoding: [0xb9,0x9f,0x00,0xf0]
+
+ ssair %r0
+ ssair %r1
+ ssair %r15
+
+#CHECK: ssch 0 # encoding: [0xb2,0x33,0x00,0x00]
+#CHECK: ssch 0(%r1) # encoding: [0xb2,0x33,0x10,0x00]
+#CHECK: ssch 0(%r15) # encoding: [0xb2,0x33,0xf0,0x00]
+#CHECK: ssch 4095 # encoding: [0xb2,0x33,0x0f,0xff]
+#CHECK: ssch 4095(%r1) # encoding: [0xb2,0x33,0x1f,0xff]
+#CHECK: ssch 4095(%r15) # encoding: [0xb2,0x33,0xff,0xff]
+
+ ssch 0
+ ssch 0(%r1)
+ ssch 0(%r15)
+ ssch 4095
+ ssch 4095(%r1)
+ ssch 4095(%r15)
+
+#CHECK: sske %r0, %r0 # encoding: [0xb2,0x2b,0x00,0x00]
+#CHECK: sske %r0, %r15 # encoding: [0xb2,0x2b,0x00,0x0f]
+#CHECK: sske %r15, %r0 # encoding: [0xb2,0x2b,0x00,0xf0]
+#CHECK: sske %r0, %r0, 15 # encoding: [0xb2,0x2b,0xf0,0x00]
+#CHECK: sske %r4, %r6, 7 # encoding: [0xb2,0x2b,0x70,0x46]
+
+ sske %r0, %r0
+ sske %r0, %r15
+ sske %r15, %r0
+ sske %r0, %r0, 15
+ sske %r4, %r6, 7
+
+#CHECK: ssm 0 # encoding: [0x80,0x00,0x00,0x00]
+#CHECK: ssm 0(%r1) # encoding: [0x80,0x00,0x10,0x00]
+#CHECK: ssm 0(%r15) # encoding: [0x80,0x00,0xf0,0x00]
+#CHECK: ssm 4095 # encoding: [0x80,0x00,0x0f,0xff]
+#CHECK: ssm 4095(%r1) # encoding: [0x80,0x00,0x1f,0xff]
+#CHECK: ssm 4095(%r15) # encoding: [0x80,0x00,0xff,0xff]
+
+ ssm 0
+ ssm 0(%r1)
+ ssm 0(%r15)
+ ssm 4095
+ ssm 4095(%r1)
+ ssm 4095(%r15)
+
#CHECK: st %r0, 0 # encoding: [0x50,0x00,0x00,0x00]
#CHECK: st %r0, 4095 # encoding: [0x50,0x00,0x0f,0xff]
#CHECK: st %r0, 0(%r1) # encoding: [0x50,0x00,0x10,0x00]
@@ -13197,6 +14222,20 @@
stamy %a0,%a0,524287(%r1)
stamy %a0,%a0,524287(%r15)
+#CHECK: stap 0 # encoding: [0xb2,0x12,0x00,0x00]
+#CHECK: stap 0(%r1) # encoding: [0xb2,0x12,0x10,0x00]
+#CHECK: stap 0(%r15) # encoding: [0xb2,0x12,0xf0,0x00]
+#CHECK: stap 4095 # encoding: [0xb2,0x12,0x0f,0xff]
+#CHECK: stap 4095(%r1) # encoding: [0xb2,0x12,0x1f,0xff]
+#CHECK: stap 4095(%r15) # encoding: [0xb2,0x12,0xff,0xff]
+
+ stap 0
+ stap 0(%r1)
+ stap 0(%r15)
+ stap 4095
+ stap 4095(%r1)
+ stap 4095(%r15)
+
#CHECK: stc %r0, 0 # encoding: [0x42,0x00,0x00,0x00]
#CHECK: stc %r0, 4095 # encoding: [0x42,0x00,0x0f,0xff]
#CHECK: stc %r0, 0(%r1) # encoding: [0x42,0x00,0x10,0x00]
@@ -13227,6 +14266,20 @@
stck 4095(%r1)
stck 4095(%r15)
+#CHECK: stckc 0 # encoding: [0xb2,0x07,0x00,0x00]
+#CHECK: stckc 0(%r1) # encoding: [0xb2,0x07,0x10,0x00]
+#CHECK: stckc 0(%r15) # encoding: [0xb2,0x07,0xf0,0x00]
+#CHECK: stckc 4095 # encoding: [0xb2,0x07,0x0f,0xff]
+#CHECK: stckc 4095(%r1) # encoding: [0xb2,0x07,0x1f,0xff]
+#CHECK: stckc 4095(%r15) # encoding: [0xb2,0x07,0xff,0xff]
+
+ stckc 0
+ stckc 0(%r1)
+ stckc 0(%r15)
+ stckc 4095
+ stckc 4095(%r1)
+ stckc 4095(%r15)
+
#CHECK: stcke 0 # encoding: [0xb2,0x78,0x00,0x00]
#CHECK: stcke 0(%r1) # encoding: [0xb2,0x78,0x10,0x00]
#CHECK: stcke 0(%r15) # encoding: [0xb2,0x78,0xf0,0x00]
@@ -13315,6 +14368,84 @@
stcmy %r0, 0, 524287(%r1)
stcmy %r15, 0, 0
+#CHECK: stcps 0 # encoding: [0xb2,0x3a,0x00,0x00]
+#CHECK: stcps 0(%r1) # encoding: [0xb2,0x3a,0x10,0x00]
+#CHECK: stcps 0(%r15) # encoding: [0xb2,0x3a,0xf0,0x00]
+#CHECK: stcps 4095 # encoding: [0xb2,0x3a,0x0f,0xff]
+#CHECK: stcps 4095(%r1) # encoding: [0xb2,0x3a,0x1f,0xff]
+#CHECK: stcps 4095(%r15) # encoding: [0xb2,0x3a,0xff,0xff]
+
+ stcps 0
+ stcps 0(%r1)
+ stcps 0(%r15)
+ stcps 4095
+ stcps 4095(%r1)
+ stcps 4095(%r15)
+
+#CHECK: stcrw 0 # encoding: [0xb2,0x39,0x00,0x00]
+#CHECK: stcrw 0(%r1) # encoding: [0xb2,0x39,0x10,0x00]
+#CHECK: stcrw 0(%r15) # encoding: [0xb2,0x39,0xf0,0x00]
+#CHECK: stcrw 4095 # encoding: [0xb2,0x39,0x0f,0xff]
+#CHECK: stcrw 4095(%r1) # encoding: [0xb2,0x39,0x1f,0xff]
+#CHECK: stcrw 4095(%r15) # encoding: [0xb2,0x39,0xff,0xff]
+
+ stcrw 0
+ stcrw 0(%r1)
+ stcrw 0(%r15)
+ stcrw 4095
+ stcrw 4095(%r1)
+ stcrw 4095(%r15)
+
+#CHECK: stctg %c0, %c0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x25]
+#CHECK: stctg %c0, %c15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x25]
+#CHECK: stctg %c14, %c15, 0 # encoding: [0xeb,0xef,0x00,0x00,0x00,0x25]
+#CHECK: stctg %c15, %c15, 0 # encoding: [0xeb,0xff,0x00,0x00,0x00,0x25]
+#CHECK: stctg %c0, %c0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x25]
+#CHECK: stctg %c0, %c0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x25]
+#CHECK: stctg %c0, %c0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x25]
+#CHECK: stctg %c0, %c0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0x25]
+#CHECK: stctg %c0, %c0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0x25]
+#CHECK: stctg %c0, %c0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0x25]
+#CHECK: stctg %c0, %c0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0x25]
+#CHECK: stctg %c0, %c0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x25]
+#CHECK: stctg %c0, %c0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0x25]
+
+ stctg %c0,%c0,0
+ stctg %c0,%c15,0
+ stctg %c14,%c15,0
+ stctg %c15,%c15,0
+ stctg %c0,%c0,-524288
+ stctg %c0,%c0,-1
+ stctg %c0,%c0,0
+ stctg %c0,%c0,1
+ stctg %c0,%c0,524287
+ stctg %c0,%c0,0(%r1)
+ stctg %c0,%c0,0(%r15)
+ stctg %c0,%c0,524287(%r1)
+ stctg %c0,%c0,524287(%r15)
+
+#CHECK: stctl %c0, %c0, 0 # encoding: [0xb6,0x00,0x00,0x00]
+#CHECK: stctl %c0, %c15, 0 # encoding: [0xb6,0x0f,0x00,0x00]
+#CHECK: stctl %c14, %c15, 0 # encoding: [0xb6,0xef,0x00,0x00]
+#CHECK: stctl %c15, %c15, 0 # encoding: [0xb6,0xff,0x00,0x00]
+#CHECK: stctl %c0, %c0, 4095 # encoding: [0xb6,0x00,0x0f,0xff]
+#CHECK: stctl %c0, %c0, 1 # encoding: [0xb6,0x00,0x00,0x01]
+#CHECK: stctl %c0, %c0, 0(%r1) # encoding: [0xb6,0x00,0x10,0x00]
+#CHECK: stctl %c0, %c0, 0(%r15) # encoding: [0xb6,0x00,0xf0,0x00]
+#CHECK: stctl %c0, %c0, 4095(%r1) # encoding: [0xb6,0x00,0x1f,0xff]
+#CHECK: stctl %c0, %c0, 4095(%r15) # encoding: [0xb6,0x00,0xff,0xff]
+
+ stctl %c0,%c0,0
+ stctl %c0,%c15,0
+ stctl %c14,%c15,0
+ stctl %c15,%c15,0
+ stctl %c0,%c0,4095
+ stctl %c0,%c0,1
+ stctl %c0,%c0,0(%r1)
+ stctl %c0,%c0,0(%r15)
+ stctl %c0,%c0,4095(%r1)
+ stctl %c0,%c0,4095(%r15)
+
#CHECK: stcy %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x72]
#CHECK: stcy %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x72]
#CHECK: stcy %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x72]
@@ -13413,6 +14544,20 @@
stey %f0, 524287(%r15,%r1)
stey %f15, 0
+#CHECK: stfl 0 # encoding: [0xb2,0xb1,0x00,0x00]
+#CHECK: stfl 0(%r1) # encoding: [0xb2,0xb1,0x10,0x00]
+#CHECK: stfl 0(%r15) # encoding: [0xb2,0xb1,0xf0,0x00]
+#CHECK: stfl 4095 # encoding: [0xb2,0xb1,0x0f,0xff]
+#CHECK: stfl 4095(%r1) # encoding: [0xb2,0xb1,0x1f,0xff]
+#CHECK: stfl 4095(%r15) # encoding: [0xb2,0xb1,0xff,0xff]
+
+ stfl 0
+ stfl 0(%r1)
+ stfl 0(%r15)
+ stfl 4095
+ stfl 4095(%r1)
+ stfl 4095(%r15)
+
#CHECK: stfle 0 # encoding: [0xb2,0xb0,0x00,0x00]
#CHECK: stfle 0(%r1) # encoding: [0xb2,0xb0,0x10,0x00]
#CHECK: stfle 0(%r15) # encoding: [0xb2,0xb0,0xf0,0x00]
@@ -13575,6 +14720,20 @@
sthy %r0, 524287(%r15,%r1)
sthy %r15, 0
+#CHECK: stidp 0 # encoding: [0xb2,0x02,0x00,0x00]
+#CHECK: stidp 0(%r1) # encoding: [0xb2,0x02,0x10,0x00]
+#CHECK: stidp 0(%r15) # encoding: [0xb2,0x02,0xf0,0x00]
+#CHECK: stidp 4095 # encoding: [0xb2,0x02,0x0f,0xff]
+#CHECK: stidp 4095(%r1) # encoding: [0xb2,0x02,0x1f,0xff]
+#CHECK: stidp 4095(%r15) # encoding: [0xb2,0x02,0xff,0xff]
+
+ stidp 0
+ stidp 0(%r1)
+ stidp 0(%r15)
+ stidp 4095
+ stidp 4095(%r1)
+ stidp 4095(%r15)
+
#CHECK: stm %r0, %r0, 0 # encoding: [0x90,0x00,0x00,0x00]
#CHECK: stm %r0, %r15, 0 # encoding: [0x90,0x0f,0x00,0x00]
#CHECK: stm %r14, %r15, 0 # encoding: [0x90,0xef,0x00,0x00]
@@ -13681,6 +14840,52 @@
stmy %r0,%r0,524287(%r1)
stmy %r0,%r0,524287(%r15)
+#CHECK: stnsm 0, 0 # encoding: [0xac,0x00,0x00,0x00]
+#CHECK: stnsm 4095, 0 # encoding: [0xac,0x00,0x0f,0xff]
+#CHECK: stnsm 0, 255 # encoding: [0xac,0xff,0x00,0x00]
+#CHECK: stnsm 0(%r1), 42 # encoding: [0xac,0x2a,0x10,0x00]
+#CHECK: stnsm 0(%r15), 42 # encoding: [0xac,0x2a,0xf0,0x00]
+#CHECK: stnsm 4095(%r1), 42 # encoding: [0xac,0x2a,0x1f,0xff]
+#CHECK: stnsm 4095(%r15), 42 # encoding: [0xac,0x2a,0xff,0xff]
+
+ stnsm 0, 0
+ stnsm 4095, 0
+ stnsm 0, 255
+ stnsm 0(%r1), 42
+ stnsm 0(%r15), 42
+ stnsm 4095(%r1), 42
+ stnsm 4095(%r15), 42
+
+#CHECK: stosm 0, 0 # encoding: [0xad,0x00,0x00,0x00]
+#CHECK: stosm 4095, 0 # encoding: [0xad,0x00,0x0f,0xff]
+#CHECK: stosm 0, 255 # encoding: [0xad,0xff,0x00,0x00]
+#CHECK: stosm 0(%r1), 42 # encoding: [0xad,0x2a,0x10,0x00]
+#CHECK: stosm 0(%r15), 42 # encoding: [0xad,0x2a,0xf0,0x00]
+#CHECK: stosm 4095(%r1), 42 # encoding: [0xad,0x2a,0x1f,0xff]
+#CHECK: stosm 4095(%r15), 42 # encoding: [0xad,0x2a,0xff,0xff]
+
+ stosm 0, 0
+ stosm 4095, 0
+ stosm 0, 255
+ stosm 0(%r1), 42
+ stosm 0(%r15), 42
+ stosm 4095(%r1), 42
+ stosm 4095(%r15), 42
+
+#CHECK: stpt 0 # encoding: [0xb2,0x09,0x00,0x00]
+#CHECK: stpt 0(%r1) # encoding: [0xb2,0x09,0x10,0x00]
+#CHECK: stpt 0(%r15) # encoding: [0xb2,0x09,0xf0,0x00]
+#CHECK: stpt 4095 # encoding: [0xb2,0x09,0x0f,0xff]
+#CHECK: stpt 4095(%r1) # encoding: [0xb2,0x09,0x1f,0xff]
+#CHECK: stpt 4095(%r15) # encoding: [0xb2,0x09,0xff,0xff]
+
+ stpt 0
+ stpt 0(%r1)
+ stpt 0(%r15)
+ stpt 4095
+ stpt 4095(%r1)
+ stpt 4095(%r15)
+
#CHECK: stpq %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x8e]
#CHECK: stpq %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x8e]
#CHECK: stpq %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x8e]
@@ -13703,6 +14908,20 @@
stpq %r0, 524287(%r15,%r1)
stpq %r14, 0
+#CHECK: stpx 0 # encoding: [0xb2,0x11,0x00,0x00]
+#CHECK: stpx 0(%r1) # encoding: [0xb2,0x11,0x10,0x00]
+#CHECK: stpx 0(%r15) # encoding: [0xb2,0x11,0xf0,0x00]
+#CHECK: stpx 4095 # encoding: [0xb2,0x11,0x0f,0xff]
+#CHECK: stpx 4095(%r1) # encoding: [0xb2,0x11,0x1f,0xff]
+#CHECK: stpx 4095(%r15) # encoding: [0xb2,0x11,0xff,0xff]
+
+ stpx 0
+ stpx 0(%r1)
+ stpx 0(%r15)
+ stpx 4095
+ stpx 4095(%r1)
+ stpx 4095(%r15)
+
#CHECK: strag 0, 0 # encoding: [0xe5,0x02,0x00,0x00,0x00,0x00]
#CHECK: strag 0(%r1), 0(%r2) # encoding: [0xe5,0x02,0x10,0x00,0x20,0x00]
#CHECK: strag 160(%r1), 320(%r15) # encoding: [0xe5,0x02,0x10,0xa0,0xf1,0x40]
@@ -13828,6 +15047,54 @@
strvh %r0,524287(%r15,%r1)
strvh %r15,0
+#CHECK: stsch 0 # encoding: [0xb2,0x34,0x00,0x00]
+#CHECK: stsch 0(%r1) # encoding: [0xb2,0x34,0x10,0x00]
+#CHECK: stsch 0(%r15) # encoding: [0xb2,0x34,0xf0,0x00]
+#CHECK: stsch 4095 # encoding: [0xb2,0x34,0x0f,0xff]
+#CHECK: stsch 4095(%r1) # encoding: [0xb2,0x34,0x1f,0xff]
+#CHECK: stsch 4095(%r15) # encoding: [0xb2,0x34,0xff,0xff]
+
+ stsch 0
+ stsch 0(%r1)
+ stsch 0(%r15)
+ stsch 4095
+ stsch 4095(%r1)
+ stsch 4095(%r15)
+
+#CHECK: stsi 0 # encoding: [0xb2,0x7d,0x00,0x00]
+#CHECK: stsi 0(%r1) # encoding: [0xb2,0x7d,0x10,0x00]
+#CHECK: stsi 0(%r15) # encoding: [0xb2,0x7d,0xf0,0x00]
+#CHECK: stsi 4095 # encoding: [0xb2,0x7d,0x0f,0xff]
+#CHECK: stsi 4095(%r1) # encoding: [0xb2,0x7d,0x1f,0xff]
+#CHECK: stsi 4095(%r15) # encoding: [0xb2,0x7d,0xff,0xff]
+
+ stsi 0
+ stsi 0(%r1)
+ stsi 0(%r15)
+ stsi 4095
+ stsi 4095(%r1)
+ stsi 4095(%r15)
+
+#CHECK: stura %r0, %r0 # encoding: [0xb2,0x46,0x00,0x00]
+#CHECK: stura %r0, %r15 # encoding: [0xb2,0x46,0x00,0x0f]
+#CHECK: stura %r15, %r0 # encoding: [0xb2,0x46,0x00,0xf0]
+#CHECK: stura %r7, %r8 # encoding: [0xb2,0x46,0x00,0x78]
+
+ stura %r0,%r0
+ stura %r0,%r15
+ stura %r15,%r0
+ stura %r7,%r8
+
+#CHECK: sturg %r0, %r0 # encoding: [0xb9,0x25,0x00,0x00]
+#CHECK: sturg %r0, %r15 # encoding: [0xb9,0x25,0x00,0x0f]
+#CHECK: sturg %r15, %r0 # encoding: [0xb9,0x25,0x00,0xf0]
+#CHECK: sturg %r7, %r8 # encoding: [0xb9,0x25,0x00,0x78]
+
+ sturg %r0,%r0
+ sturg %r0,%r15
+ sturg %r15,%r0
+ sturg %r7,%r8
+
#CHECK: sty %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x50]
#CHECK: sty %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x50]
#CHECK: sty %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x50]
@@ -13970,6 +15237,28 @@
tam
+#CHECK: tar %a0, %r0 # encoding: [0xb2,0x4c,0x00,0x00]
+#CHECK: tar %a0, %r15 # encoding: [0xb2,0x4c,0x00,0x0f]
+#CHECK: tar %a15, %r0 # encoding: [0xb2,0x4c,0x00,0xf0]
+#CHECK: tar %a7, %r8 # encoding: [0xb2,0x4c,0x00,0x78]
+
+ tar %a0,%r0
+ tar %a0,%r15
+ tar %a15,%r0
+ tar %a7,%r8
+
+#CHECK: tb %r0, %r0 # encoding: [0xb2,0x2c,0x00,0x00]
+#CHECK: tb %r0, %r15 # encoding: [0xb2,0x2c,0x00,0x0f]
+#CHECK: tb %r15, %r0 # encoding: [0xb2,0x2c,0x00,0xf0]
+#CHECK: tb %r7, %r8 # encoding: [0xb2,0x2c,0x00,0x78]
+#CHECK: tb %r15, %r15 # encoding: [0xb2,0x2c,0x00,0xff]
+
+ tb %r0,%r0
+ tb %r0,%r15
+ tb %r15,%r0
+ tb %r7,%r8
+ tb %r15,%r15
+
#CHECK: tbdr %f0, 0, %f0 # encoding: [0xb3,0x51,0x00,0x00]
#CHECK: tbdr %f0, 0, %f15 # encoding: [0xb3,0x51,0x00,0x0f]
#CHECK: tbdr %f0, 15, %f0 # encoding: [0xb3,0x51,0xf0,0x00]
@@ -14272,6 +15561,42 @@
tp 0(16,%r1)
tp 0(16,%r15)
+#CHECK: tpi 0 # encoding: [0xb2,0x36,0x00,0x00]
+#CHECK: tpi 0(%r1) # encoding: [0xb2,0x36,0x10,0x00]
+#CHECK: tpi 0(%r15) # encoding: [0xb2,0x36,0xf0,0x00]
+#CHECK: tpi 4095 # encoding: [0xb2,0x36,0x0f,0xff]
+#CHECK: tpi 4095(%r1) # encoding: [0xb2,0x36,0x1f,0xff]
+#CHECK: tpi 4095(%r15) # encoding: [0xb2,0x36,0xff,0xff]
+
+ tpi 0
+ tpi 0(%r1)
+ tpi 0(%r15)
+ tpi 4095
+ tpi 4095(%r1)
+ tpi 4095(%r15)
+
+#CHECK: tprot 0, 0 # encoding: [0xe5,0x01,0x00,0x00,0x00,0x00]
+#CHECK: tprot 0(%r1), 0(%r2) # encoding: [0xe5,0x01,0x10,0x00,0x20,0x00]
+#CHECK: tprot 160(%r1), 320(%r15) # encoding: [0xe5,0x01,0x10,0xa0,0xf1,0x40]
+#CHECK: tprot 0(%r1), 4095 # encoding: [0xe5,0x01,0x10,0x00,0x0f,0xff]
+#CHECK: tprot 0(%r1), 4095(%r2) # encoding: [0xe5,0x01,0x10,0x00,0x2f,0xff]
+#CHECK: tprot 0(%r1), 4095(%r15) # encoding: [0xe5,0x01,0x10,0x00,0xff,0xff]
+#CHECK: tprot 0(%r1), 0 # encoding: [0xe5,0x01,0x10,0x00,0x00,0x00]
+#CHECK: tprot 0(%r15), 0 # encoding: [0xe5,0x01,0xf0,0x00,0x00,0x00]
+#CHECK: tprot 4095(%r1), 0 # encoding: [0xe5,0x01,0x1f,0xff,0x00,0x00]
+#CHECK: tprot 4095(%r15), 0 # encoding: [0xe5,0x01,0xff,0xff,0x00,0x00]
+
+ tprot 0, 0
+ tprot 0(%r1), 0(%r2)
+ tprot 160(%r1), 320(%r15)
+ tprot 0(%r1), 4095
+ tprot 0(%r1), 4095(%r2)
+ tprot 0(%r1), 4095(%r15)
+ tprot 0(%r1), 0
+ tprot 0(%r15), 0
+ tprot 4095(%r1), 0
+ tprot 4095(%r15), 0
+
#CHECK: tr 0(1), 0 # encoding: [0xdc,0x00,0x00,0x00,0x00,0x00]
#CHECK: tr 0(1), 0(%r1) # encoding: [0xdc,0x00,0x00,0x00,0x10,0x00]
#CHECK: tr 0(1), 0(%r15) # encoding: [0xdc,0x00,0x00,0x00,0xf0,0x00]
@@ -14298,6 +15623,74 @@
tr 0(256,%r1), 0
tr 0(256,%r15), 0
+#CHECK: trace %r0, %r0, 0 # encoding: [0x99,0x00,0x00,0x00]
+#CHECK: trace %r0, %r15, 0 # encoding: [0x99,0x0f,0x00,0x00]
+#CHECK: trace %r14, %r15, 0 # encoding: [0x99,0xef,0x00,0x00]
+#CHECK: trace %r15, %r15, 0 # encoding: [0x99,0xff,0x00,0x00]
+#CHECK: trace %r0, %r0, 4095 # encoding: [0x99,0x00,0x0f,0xff]
+#CHECK: trace %r0, %r0, 1 # encoding: [0x99,0x00,0x00,0x01]
+#CHECK: trace %r0, %r0, 0(%r1) # encoding: [0x99,0x00,0x10,0x00]
+#CHECK: trace %r0, %r0, 0(%r15) # encoding: [0x99,0x00,0xf0,0x00]
+#CHECK: trace %r0, %r0, 4095(%r1) # encoding: [0x99,0x00,0x1f,0xff]
+#CHECK: trace %r0, %r0, 4095(%r15) # encoding: [0x99,0x00,0xff,0xff]
+
+ trace %r0,%r0,0
+ trace %r0,%r15,0
+ trace %r14,%r15,0
+ trace %r15,%r15,0
+ trace %r0,%r0,4095
+ trace %r0,%r0,1
+ trace %r0,%r0,0(%r1)
+ trace %r0,%r0,0(%r15)
+ trace %r0,%r0,4095(%r1)
+ trace %r0,%r0,4095(%r15)
+
+#CHECK: tracg %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x0f]
+#CHECK: tracg %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0x0f]
+#CHECK: tracg %r14, %r15, 0 # encoding: [0xeb,0xef,0x00,0x00,0x00,0x0f]
+#CHECK: tracg %r15, %r15, 0 # encoding: [0xeb,0xff,0x00,0x00,0x00,0x0f]
+#CHECK: tracg %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0x0f]
+#CHECK: tracg %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0x0f]
+#CHECK: tracg %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0x0f]
+#CHECK: tracg %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0x0f]
+#CHECK: tracg %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0x0f]
+#CHECK: tracg %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0x0f]
+#CHECK: tracg %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0x0f]
+#CHECK: tracg %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0x0f]
+#CHECK: tracg %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0x0f]
+
+ tracg %r0,%r0,0
+ tracg %r0,%r15,0
+ tracg %r14,%r15,0
+ tracg %r15,%r15,0
+ tracg %r0,%r0,-524288
+ tracg %r0,%r0,-1
+ tracg %r0,%r0,0
+ tracg %r0,%r0,1
+ tracg %r0,%r0,524287
+ tracg %r0,%r0,0(%r1)
+ tracg %r0,%r0,0(%r15)
+ tracg %r0,%r0,524287(%r1)
+ tracg %r0,%r0,524287(%r15)
+
+#CHECK: trap2 # encoding: [0x01,0xff]
+
+ trap2
+
+#CHECK: trap4 0 # encoding: [0xb2,0xff,0x00,0x00]
+#CHECK: trap4 0(%r1) # encoding: [0xb2,0xff,0x10,0x00]
+#CHECK: trap4 0(%r15) # encoding: [0xb2,0xff,0xf0,0x00]
+#CHECK: trap4 4095 # encoding: [0xb2,0xff,0x0f,0xff]
+#CHECK: trap4 4095(%r1) # encoding: [0xb2,0xff,0x1f,0xff]
+#CHECK: trap4 4095(%r15) # encoding: [0xb2,0xff,0xff,0xff]
+
+ trap4 0
+ trap4 0(%r1)
+ trap4 0(%r15)
+ trap4 4095
+ trap4 4095(%r1)
+ trap4 4095(%r15)
+
#CHECK: tre %r0, %r0 # encoding: [0xb2,0xa5,0x00,0x00]
#CHECK: tre %r0, %r15 # encoding: [0xb2,0xa5,0x00,0x0f]
#CHECK: tre %r14, %r0 # encoding: [0xb2,0xa5,0x00,0xe0]
@@ -14458,6 +15851,20 @@
ts 4095(%r1)
ts 4095(%r15)
+#CHECK: tsch 0 # encoding: [0xb2,0x35,0x00,0x00]
+#CHECK: tsch 0(%r1) # encoding: [0xb2,0x35,0x10,0x00]
+#CHECK: tsch 0(%r15) # encoding: [0xb2,0x35,0xf0,0x00]
+#CHECK: tsch 4095 # encoding: [0xb2,0x35,0x0f,0xff]
+#CHECK: tsch 4095(%r1) # encoding: [0xb2,0x35,0x1f,0xff]
+#CHECK: tsch 4095(%r15) # encoding: [0xb2,0x35,0xff,0xff]
+
+ tsch 0
+ tsch 0(%r1)
+ tsch 0(%r15)
+ tsch 4095
+ tsch 4095(%r1)
+ tsch 4095(%r15)
+
#CHECK: unpk 0(1), 0(1) # encoding: [0xf3,0x00,0x00,0x00,0x00,0x00]
#CHECK: unpk 0(1), 0(1,%r1) # encoding: [0xf3,0x00,0x00,0x00,0x10,0x00]
#CHECK: unpk 0(1), 0(1,%r15) # encoding: [0xf3,0x00,0x00,0x00,0xf0,0x00]
@@ -14682,6 +16089,10 @@
xr %r15,%r0
xr %r7,%r8
+#CHECK: xsch # encoding: [0xb2,0x76,0x00,0x00]
+
+ xsch
+
#CHECK: xy %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x57]
#CHECK: xy %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x57]
#CHECK: xy %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x57]
diff --git a/test/MC/SystemZ/regs-bad.s b/test/MC/SystemZ/regs-bad.s
index 37c83dd4b8ffa..f4cdb69821d2e 100644
--- a/test/MC/SystemZ/regs-bad.s
+++ b/test/MC/SystemZ/regs-bad.s
@@ -8,18 +8,24 @@
#CHECK: error: invalid operand for instruction
#CHECK: lr %a0,%r1
#CHECK: error: invalid operand for instruction
+#CHECK: lr %c0,%r1
+#CHECK: error: invalid operand for instruction
#CHECK: lr %r0,%f1
#CHECK: error: invalid operand for instruction
#CHECK: lr %r0,%a1
#CHECK: error: invalid operand for instruction
+#CHECK: lr %r0,%c1
+#CHECK: error: invalid operand for instruction
#CHECK: lr %r0,0
#CHECK: error: invalid operand for instruction
#CHECK: lr %r0,0(%r1)
lr %f0,%r1
lr %a0,%r1
+ lr %c0,%r1
lr %r0,%f1
lr %r0,%a1
+ lr %r0,%c1
lr %r0,0
lr %r0,0(%r1)
@@ -30,18 +36,24 @@
#CHECK: error: invalid operand for instruction
#CHECK: lgr %a0,%r1
#CHECK: error: invalid operand for instruction
+#CHECK: lgr %c0,%r1
+#CHECK: error: invalid operand for instruction
#CHECK: lgr %r0,%f1
#CHECK: error: invalid operand for instruction
#CHECK: lgr %r0,%a1
#CHECK: error: invalid operand for instruction
+#CHECK: lgr %r0,%c1
+#CHECK: error: invalid operand for instruction
#CHECK: lgr %r0,0
#CHECK: error: invalid operand for instruction
#CHECK: lgr %r0,0(%r1)
lgr %f0,%r1
lgr %a0,%r1
+ lgr %c0,%r1
lgr %r0,%f1
lgr %r0,%a1
+ lgr %r0,%c1
lgr %r0,0
lgr %r0,0(%r1)
@@ -68,10 +80,14 @@
#CHECK: error: invalid operand for instruction
#CHECK: dlr %a0,%r1
#CHECK: error: invalid operand for instruction
+#CHECK: dlr %c0,%r1
+#CHECK: error: invalid operand for instruction
#CHECK: dlr %r0,%f1
#CHECK: error: invalid operand for instruction
#CHECK: dlr %r0,%a1
#CHECK: error: invalid operand for instruction
+#CHECK: dlr %r0,%c1
+#CHECK: error: invalid operand for instruction
#CHECK: dlr %r0,0
#CHECK: error: invalid operand for instruction
#CHECK: dlr %r0,0(%r1)
@@ -86,8 +102,10 @@
dlr %r15,%r0
dlr %f0,%r1
dlr %a0,%r1
+ dlr %c0,%r1
dlr %r0,%f1
dlr %r0,%a1
+ dlr %r0,%c1
dlr %r0,0
dlr %r0,0(%r1)
@@ -98,18 +116,24 @@
#CHECK: error: invalid operand for instruction
#CHECK: ler %a0,%f1
#CHECK: error: invalid operand for instruction
+#CHECK: ler %c0,%f1
+#CHECK: error: invalid operand for instruction
#CHECK: ler %f0,%r1
#CHECK: error: invalid operand for instruction
#CHECK: ler %f0,%a1
#CHECK: error: invalid operand for instruction
+#CHECK: ler %f0,%c1
+#CHECK: error: invalid operand for instruction
#CHECK: ler %f0,0
#CHECK: error: invalid operand for instruction
#CHECK: ler %f0,0(%r1)
ler %r0,%f1
ler %a0,%f1
+ ler %c0,%f1
ler %f0,%r1
ler %f0,%a1
+ ler %f0,%c1
ler %f0,0
ler %f0,0(%r1)
@@ -120,18 +144,24 @@
#CHECK: error: invalid operand for instruction
#CHECK: ldr %a0,%f1
#CHECK: error: invalid operand for instruction
+#CHECK: ldr %c0,%f1
+#CHECK: error: invalid operand for instruction
#CHECK: ldr %f0,%r1
#CHECK: error: invalid operand for instruction
#CHECK: ldr %f0,%a1
#CHECK: error: invalid operand for instruction
+#CHECK: ldr %f0,%c1
+#CHECK: error: invalid operand for instruction
#CHECK: ldr %f0,0
#CHECK: error: invalid operand for instruction
#CHECK: ldr %f0,0(%r1)
ldr %r0,%f1
ldr %a0,%f1
+ ldr %c0,%f1
ldr %f0,%r1
ldr %f0,%a1
+ ldr %f0,%c1
ldr %f0,0
ldr %f0,0(%r1)
@@ -158,10 +188,14 @@
#CHECK: error: invalid operand for instruction
#CHECK: lxr %a0,%f1
#CHECK: error: invalid operand for instruction
+#CHECK: lxr %c0,%f1
+#CHECK: error: invalid operand for instruction
#CHECK: lxr %f0,%r1
#CHECK: error: invalid operand for instruction
#CHECK: lxr %f0,%a1
#CHECK: error: invalid operand for instruction
+#CHECK: lxr %f0,%c1
+#CHECK: error: invalid operand for instruction
#CHECK: lxr %f0,0
#CHECK: error: invalid operand for instruction
#CHECK: lxr %f0,0(%r1)
@@ -176,8 +210,10 @@
lxr %f0,%f15
lxr %r0,%f1
lxr %a0,%f1
+ lxr %c0,%f1
lxr %f0,%r1
lxr %f0,%a1
+ lxr %f0,%c1
lxr %f0,0
lxr %f0,0(%r1)
@@ -188,15 +224,33 @@
#CHECK: error: invalid operand for instruction
#CHECK: ear %r0,%f0
#CHECK: error: invalid operand for instruction
+#CHECK: ear %r0,%c0
+#CHECK: error: invalid operand for instruction
#CHECK: ear %r0,0
#CHECK: error: invalid operand for instruction
#CHECK: ear %r0,0(%r1)
ear %r0,%r0
ear %r0,%f0
+ ear %r0,%c0
ear %r0,0
ear %r0,0(%r1)
+# Test control register operands
+#
+#CHECK: error: invalid operand for instruction
+#CHECK: lctl %c0,%r0,0
+#CHECK: lctl %c0,%f0,0
+#CHECK: lctl %c0,%a0,0
+#CHECK: lctl %c0,0,0
+#CHECK: lctl %c0,0(%r1),0
+
+ lctl %c0,%r0,0
+ lctl %c0,%f0,0
+ lctl %c0,%a0,0
+ lctl %c0,0,0
+ lctl %c0,0(%r1),0
+
.cfi_startproc
# Test general register parsing, with no predetermined class in mind.
@@ -212,9 +266,9 @@
#CHECK: error: invalid register
#CHECK: .cfi_offset %a,0
#CHECK: error: invalid register
-#CHECK: .cfi_offset %0,0
+#CHECK: .cfi_offset %c,0
#CHECK: error: invalid register
-#CHECK: .cfi_offset %c0,0
+#CHECK: .cfi_offset %0,0
#CHECK: error: invalid register
#CHECK: .cfi_offset %r16,0
#CHECK: error: invalid register
@@ -222,6 +276,8 @@
#CHECK: error: invalid register
#CHECK: .cfi_offset %a16,0
#CHECK: error: invalid register
+#CHECK: .cfi_offset %c16,0
+#CHECK: error: invalid register
#CHECK: .cfi_offset %reef,0
#CHECK: error: invalid register
#CHECK: .cfi_offset %arid,0
@@ -231,11 +287,12 @@
.cfi_offset %r,0
.cfi_offset %f,0
.cfi_offset %a,0
+ .cfi_offset %c,0
.cfi_offset %0,0
- .cfi_offset %c0,0
.cfi_offset %r16,0
.cfi_offset %f16,0
.cfi_offset %a16,0
+ .cfi_offset %c16,0
.cfi_offset %reef,0
.cfi_offset %arid,0
diff --git a/test/MC/SystemZ/regs-good.s b/test/MC/SystemZ/regs-good.s
index 4047579bcbb37..c20301133d870 100644
--- a/test/MC/SystemZ/regs-good.s
+++ b/test/MC/SystemZ/regs-good.s
@@ -118,6 +118,25 @@
cpya %a12,%a13
cpya %a14,%a15
+#CHECK: lctl %c0, %c1, 0 # encoding: [0xb7,0x01,0x00,0x00]
+#CHECK: lctl %c2, %c3, 0 # encoding: [0xb7,0x23,0x00,0x00]
+#CHECK: lctl %c4, %c5, 0 # encoding: [0xb7,0x45,0x00,0x00]
+#CHECK: lctl %c6, %c7, 0 # encoding: [0xb7,0x67,0x00,0x00]
+#CHECK: lctl %c8, %c9, 0 # encoding: [0xb7,0x89,0x00,0x00]
+#CHECK: lctl %c10, %c11, 0 # encoding: [0xb7,0xab,0x00,0x00]
+#CHECK: lctl %c12, %c13, 0 # encoding: [0xb7,0xcd,0x00,0x00]
+#CHECK: lctl %c14, %c15, 0 # encoding: [0xb7,0xef,0x00,0x00]
+
+ lctl %c0,%c1,0
+ lctl %c2,%c3,0
+ lctl %c4,%c5,0
+ lctl %c6,%c7,0
+ lctl %c8,%c9,0
+ lctl %c10,%c11,0
+ lctl %c12,%c13,0
+ lctl %c14,%c15,0
+
+
#CHECK: .cfi_offset %r0, 0
#CHECK: .cfi_offset %r1, 8
#CHECK: .cfi_offset %r2, 16
@@ -166,6 +185,22 @@
#CHECK: .cfi_offset %a13, 308
#CHECK: .cfi_offset %a14, 312
#CHECK: .cfi_offset %a15, 316
+#CHECK: .cfi_offset %c0, 318
+#CHECK: .cfi_offset %c1, 326
+#CHECK: .cfi_offset %c2, 334
+#CHECK: .cfi_offset %c3, 342
+#CHECK: .cfi_offset %c4, 350
+#CHECK: .cfi_offset %c5, 358
+#CHECK: .cfi_offset %c6, 366
+#CHECK: .cfi_offset %c7, 374
+#CHECK: .cfi_offset %c8, 382
+#CHECK: .cfi_offset %c9, 390
+#CHECK: .cfi_offset %c10, 398
+#CHECK: .cfi_offset %c11, 406
+#CHECK: .cfi_offset %c12, 414
+#CHECK: .cfi_offset %c13, 422
+#CHECK: .cfi_offset %c14, 430
+#CHECK: .cfi_offset %c15, 438
.cfi_startproc
.cfi_offset %r0,0
@@ -216,4 +251,20 @@
.cfi_offset %a13,308
.cfi_offset %a14,312
.cfi_offset %a15,316
+ .cfi_offset %c0,318
+ .cfi_offset %c1,326
+ .cfi_offset %c2,334
+ .cfi_offset %c3,342
+ .cfi_offset %c4,350
+ .cfi_offset %c5,358
+ .cfi_offset %c6,366
+ .cfi_offset %c7,374
+ .cfi_offset %c8,382
+ .cfi_offset %c9,390
+ .cfi_offset %c10,398
+ .cfi_offset %c11,406
+ .cfi_offset %c12,414
+ .cfi_offset %c13,422
+ .cfi_offset %c14,430
+ .cfi_offset %c15,438
.cfi_endproc
diff --git a/test/MC/WebAssembly/unnamed-data.ll b/test/MC/WebAssembly/unnamed-data.ll
index 77a7c08f6594a..fd985088c1d27 100644
--- a/test/MC/WebAssembly/unnamed-data.ll
+++ b/test/MC/WebAssembly/unnamed-data.ll
@@ -7,47 +7,53 @@
@b = global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str2, i32 0, i32 0), align 8
-; CHECK: - Type: GLOBAL
-; CHECK: Globals:
-; CHECK: - Type: I32
-; CHECK: Mutable: false
-; CHECK: InitExpr:
-; CHECK: Opcode: I32_CONST
-; CHECK: Value: 0
-; CHECK: - Type: I32
-; CHECK: Mutable: false
-; CHECK: InitExpr:
-; CHECK: Opcode: I32_CONST
-; CHECK: Value: 6
-; CHECK: - Type: I32
-; CHECK: Mutable: false
-; CHECK: InitExpr:
-; CHECK: Opcode: I32_CONST
-; CHECK: Value: 16
-; CHECK: - Type: I32
-; CHECK: Mutable: false
-; CHECK: InitExpr:
-; CHECK: Opcode: I32_CONST
-; CHECK: Value: 24
-; CHECK: - Type: EXPORT
-; CHECK: Exports:
-; CHECK: - Name: a
-; CHECK: Kind: GLOBAL
-; CHECK: Index: 2
-; CHECK: - Name: b
-; CHECK: Kind: GLOBAL
-; CHECK: Index: 3
-; CHECK: - Type: DATA
-; CHECK: Relocations:
-; CHECK: - Type: R_WEBASSEMBLY_GLOBAL_ADDR_I32
-; CHECK: Index: 0
-; CHECK: Offset: 0x00000016
-; CHECK: - Type: R_WEBASSEMBLY_GLOBAL_ADDR_I32
-; CHECK: Index: 1
-; CHECK: Offset: 0x0000001E
-; CHECK: Segments:
-; CHECK: - Index: 0
-; CHECK: Offset:
-; CHECK: Opcode: I32_CONST
-; CHECK: Value: 0
-; CHECK: Content: 68656C6C6F00776F726C640000000000000000000000000006000000
+; CHECK: - Type: GLOBAL
+; CHECK-NEXT: Globals:
+; CHECK-NEXT: - Type: I32
+; CHECK-NEXT: Mutable: false
+; CHECK-NEXT: InitExpr:
+; CHECK-NEXT: Opcode: I32_CONST
+; CHECK-NEXT: Value: 0
+; CHECK-NEXT: - Type: I32
+; CHECK-NEXT: Mutable: false
+; CHECK-NEXT: InitExpr:
+; CHECK-NEXT: Opcode: I32_CONST
+; CHECK-NEXT: Value: 6
+; CHECK-NEXT: - Type: I32
+; CHECK-NEXT: Mutable: false
+; CHECK-NEXT: InitExpr:
+; CHECK-NEXT: Opcode: I32_CONST
+; CHECK-NEXT: Value: 16
+; CHECK-NEXT: - Type: I32
+; CHECK-NEXT: Mutable: false
+; CHECK-NEXT: InitExpr:
+; CHECK-NEXT: Opcode: I32_CONST
+; CHECK-NEXT: Value: 24
+; CHECK-NEXT: - Type: EXPORT
+; CHECK-NEXT: Exports:
+; CHECK-NEXT: - Name: a
+; CHECK-NEXT: Kind: GLOBAL
+; CHECK-NEXT: Index: 2
+; CHECK-NEXT: - Name: b
+; CHECK-NEXT: Kind: GLOBAL
+; CHECK-NEXT: Index: 3
+; CHECK-NEXT: - Type: DATA
+; CHECK-NEXT: Relocations:
+; CHECK-NEXT: - Type: R_WEBASSEMBLY_GLOBAL_ADDR_I32
+; CHECK-NEXT: Index: 0
+; CHECK-NEXT: Offset: 0x00000016
+; CHECK-NEXT: - Type: R_WEBASSEMBLY_GLOBAL_ADDR_I32
+; CHECK-NEXT: Index: 1
+; CHECK-NEXT: Offset: 0x0000001E
+; CHECK-NEXT: Segments:
+; CHECK-NEXT: - Index: 0
+; CHECK-NEXT: Offset:
+; CHECK-NEXT: Opcode: I32_CONST
+; CHECK-NEXT: Value: 0
+; CHECK-NEXT: Content: 68656C6C6F00776F726C640000000000000000000000000006000000
+; CHECK-NEXT: - Type: CUSTOM
+; CHECK-NEXT: Name: linking
+; CHECK-NEXT: DataSize: 28
+; CHECK-NEXT: DataAlignment: 8
+; CHECK-NEXT: SymbolInfo:
+; CHECK-NEXT: ...
diff --git a/test/MC/WebAssembly/weak-alias.ll b/test/MC/WebAssembly/weak-alias.ll
new file mode 100644
index 0000000000000..6e2b8631d2b17
--- /dev/null
+++ b/test/MC/WebAssembly/weak-alias.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple wasm32-unknown-unknown-wasm -filetype=obj %s -o - | obj2yaml | FileCheck %s
+
+; foo_alias() function is weak alias of function foo()
+; Generates two exports of the same function, one of them weak
+
+@foo_alias = weak hidden alias i32 (...), bitcast (i32 ()* @foo to i32 (...)*)
+
+define hidden i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+; CHECK: - Type: EXPORT
+; CHECK-NEXT: Exports:
+; CHECK-NEXT: - Name: foo
+; CHECK-NEXT: Kind: FUNCTION
+; CHECK-NEXT: Index: 0
+; CHECK-NEXT: - Name: foo_alias
+; CHECK-NEXT: Kind: FUNCTION
+; CHECK-NEXT: Index: 0
+
+
+; CHECK: - Type: CUSTOM
+; CHECK-NEXT: Name: name
+; CHECK-NEXT: FunctionNames:
+; CHECK-NEXT: - Index: 0
+; CHECK-NEXT: Name: foo
+; CHECK-NEXT: - Type: CUSTOM
+; CHECK-NEXT: Name: linking
+; CHECK-NEXT: DataSize: 0
+; CHECK-NEXT: DataAlignment: 0
+; CHECK-NEXT: SymbolInfo:
+; CHECK-NEXT: - Name: foo_alias
+; CHECK-NEXT: Flags: 1
+; CHECK-NEXT: ...
diff --git a/test/MC/WebAssembly/weak.ll b/test/MC/WebAssembly/weak.ll
new file mode 100644
index 0000000000000..1bc06fec5910a
--- /dev/null
+++ b/test/MC/WebAssembly/weak.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mtriple wasm32-unknown-unknown-wasm -filetype=obj %s -o - | obj2yaml | FileCheck %s
+
+; Weak external data reference
+@weak_external_data = extern_weak global i32, align 4
+
+; Weak function definition
+define weak hidden i32 @weak_function() local_unnamed_addr #0 {
+entry:
+ %0 = load i32, i32* @weak_external_data, align 4
+ ret i32 %0
+}
+
+; CHECK: - Type: IMPORT
+; CHECK-NEXT: Imports:
+; CHECK-NEXT: - Module: env
+; CHECK-NEXT: Field: weak_external_data
+; CHECK-NEXT: Kind: GLOBAL
+; CHECK-NEXT: GlobalType: I32
+; CHECK-NEXT: GlobalMutable: false
+
+
+; CHECK: - Type: CUSTOM
+; CHECK-NEXT: Name: name
+; CHECK-NEXT: FunctionNames:
+; CHECK-NEXT: - Index: 0
+; CHECK-NEXT: Name: weak_function
+; CHECK-NEXT: - Type: CUSTOM
+; CHECK-NEXT: Name: linking
+; CHECK-NEXT: DataSize: 0
+; CHECK-NEXT: DataAlignment: 0
+; CHECK-NEXT: SymbolInfo:
+; CHECK-NEXT: - Name: weak_external_data
+; CHECK-NEXT: Flags: 1
+; CHECK-NEXT: - Name: weak_function
+; CHECK-NEXT: Flags: 1
+; CHECK-NEXT: ...
diff --git a/test/MC/X86/intel-syntax-bitwise-ops.s b/test/MC/X86/intel-syntax-bitwise-ops.s
index 6d4df609c061a..a0b25800f976c 100644
--- a/test/MC/X86/intel-syntax-bitwise-ops.s
+++ b/test/MC/X86/intel-syntax-bitwise-ops.s
@@ -56,3 +56,20 @@
add eax, 6 XOR 3
// CHECK: addl $5, %eax
add eax, 6 XOR 3 shl 1 SHR 1
+// CHECK: movl $-9, %eax
+ mov eax, not(1 shl 3)
+// CHECK: movl $-2, %eax
+ mov eax, ~(0x8 shr 3)
+// CHECK: movl $-4, %eax
+ mov eax, not(1 or 3)
+// CHECK: movl $-2, %eax
+ mov eax, -(1 xor 3)
+// CHECK: movl $-2, %eax
+ mov eax, not(1 and 3)
+// CHECK: movl $3, %eax
+ mov eax, not(not 3)
+// CHECK: movl $-3, %eax
+ mov eax, ~(5 mod 3)
+// CHECK: movl $-2, %eax
+ mov eax, (-5 mod 3)
+
diff --git a/test/MC/X86/signed-coff-pcrel.s b/test/MC/X86/signed-coff-pcrel.s
new file mode 100644
index 0000000000000..768947bbf8031
--- /dev/null
+++ b/test/MC/X86/signed-coff-pcrel.s
@@ -0,0 +1,12 @@
+// RUN: llvm-mc -triple i686-unknown-windows-msvc -filetype obj -o %t.o %s
+// RUN: llvm-objdump -r %t.o | FileCheck %s
+
+// CHECK: 00000004 IMAGE_REL_I386_REL32 twop32
+
+ .section .rdata,"rd"
+twop32:
+ .quad 0x41f0000000000000
+
+ .text
+0:
+ mulsd twop32-0b(%eax), %xmm1
diff --git a/test/Object/X86/irsymtab-asm.ll b/test/Object/X86/irsymtab-asm.ll
new file mode 100644
index 0000000000000..487dc37b6571d
--- /dev/null
+++ b/test/Object/X86/irsymtab-asm.ll
@@ -0,0 +1,17 @@
+; Check that we correctly handle the case where we have inline asm and the
+; target is not registered. In this case we shouldn't emit an irsymtab.
+
+; RUN: llvm-as -o %t %s
+; RUN: llvm-bcanalyzer -dump %t | FileCheck --check-prefix=AS %s
+
+; AS-NOT: <SYMTAB_BLOCK
+
+; RUN: opt -o %t2 %s
+; RUN: llvm-bcanalyzer -dump %t2 | FileCheck --check-prefix=OPT %s
+
+; OPT: <SYMTAB_BLOCK
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+module asm "ret"
diff --git a/test/Object/X86/irsymtab-bad-alias.ll b/test/Object/X86/irsymtab-bad-alias.ll
new file mode 100644
index 0000000000000..c54436d592192
--- /dev/null
+++ b/test/Object/X86/irsymtab-bad-alias.ll
@@ -0,0 +1,15 @@
+; Check that we do not create an irsymtab for modules with malformed IR.
+
+; RUN: opt -o %t %s
+; RUN: llvm-bcanalyzer -dump %t | FileCheck %s
+
+; CHECK-NOT: <SYMTAB_BLOCK
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@g1 = global i32 1
+@g2 = global i32 2
+
+@a = alias i32, inttoptr(i32 sub (i32 ptrtoint (i32* @g1 to i32),
+ i32 ptrtoint (i32* @g2 to i32)) to i32*)
diff --git a/test/Object/X86/irsymtab.ll b/test/Object/X86/irsymtab.ll
new file mode 100644
index 0000000000000..053756d4fc6b8
--- /dev/null
+++ b/test/Object/X86/irsymtab.ll
@@ -0,0 +1,33 @@
+; RUN: env LLVM_OVERRIDE_PRODUCER=producer opt -o %t %s
+; RUN: llvm-bcanalyzer -dump -show-binary-blobs %t | FileCheck --check-prefix=BCA %s
+
+; Same producer, does not require upgrade.
+; RUN: env LLVM_OVERRIDE_PRODUCER=producer llvm-lto2 dump-symtab %t | FileCheck --check-prefix=SYMTAB %s
+
+; Different producer, requires upgrade.
+; RUN: env LLVM_OVERRIDE_PRODUCER=consumer llvm-lto2 dump-symtab %t | FileCheck --check-prefix=SYMTAB %s
+
+; BCA: <SYMTAB_BLOCK
+; Version stored at offset 0.
+; BCA-NEXT: <BLOB abbrevid=4/> blob data = '\x00\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00D\x00\x00\x00\x01\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00P\x00\x00\x00\x02\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x0E\x00\x00\x00\x18\x00\x00\x00&\x00\x00\x00\x0B\x00\x00\x001\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\xFF\xFF\xFF\xFF\x00$\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\xFF\xFF\xFF\xFF\x08$\x00\x00'
+; BCA-NEXT: </SYMTAB_BLOCK>
+; BCA-NEXT: <STRTAB_BLOCK
+; BCA-NEXT: <BLOB abbrevid=4/> blob data = 'foobarproducerx86_64-unknown-linux-gnuirsymtab.ll'
+; BCA-NEXT: </STRTAB_BLOCK>
+
+; SYMTAB: version: 0
+; SYMTAB-NEXT: producer: producer
+; SYMTAB-NEXT: target triple: x86_64-unknown-linux-gnu
+; SYMTAB-NEXT: source filename: irsymtab.ll
+; SYMTAB-NEXT: D------X foo
+; SYMTAB-NEXT: DU-----X bar
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+source_filename = "irsymtab.ll"
+
+define void @foo() {
+ ret void
+}
+
+declare void @bar()
diff --git a/test/Object/X86/yaml-elf-x86-rel-broken.yaml b/test/Object/X86/yaml-elf-x86-rel-broken.yaml
new file mode 100644
index 0000000000000..edd5dbce1236d
--- /dev/null
+++ b/test/Object/X86/yaml-elf-x86-rel-broken.yaml
@@ -0,0 +1,29 @@
+# RUN: yaml2obj %s > %t
+# RUN: obj2yaml %t | FileCheck %s
+
+# CHECK: Relocations:
+# CHECK-NEXT: - Offset:
+# CHECK-NEXT: Symbol:
+# CHECK-NEXT: Type: 0x000000FF
+
+!ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_386
+Sections:
+ - Type: SHT_PROGBITS
+ Name: .text
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ AddressAlign: 0x04
+ Content: 0000000000000000
+ - Type: SHT_REL
+ Name: .rel.text
+ Link: .symtab
+ Info: .text
+ AddressAlign: 0x04
+ Relocations:
+ - Offset: 0
+ Symbol: main
+ Type: 0xFF
diff --git a/test/ObjectYAML/wasm/weak_symbols.yaml b/test/ObjectYAML/wasm/weak_symbols.yaml
index 0ae8c9bec2a27..ab80c1e502904 100644
--- a/test/ObjectYAML/wasm/weak_symbols.yaml
+++ b/test/ObjectYAML/wasm/weak_symbols.yaml
@@ -3,6 +3,19 @@
FileHeader:
Version: 0x00000001
Sections:
+ - Type: TYPE
+ Signatures:
+ - ReturnType: I32
+ ParamTypes:
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 0 ]
+ - Type: GLOBAL
+ Globals:
+ - Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 1
- Type: EXPORT
Exports:
- Name: function_export
@@ -10,9 +23,11 @@ Sections:
Index: 1
- Name: global_export
Kind: GLOBAL
- Index: 2
+ Index: 0
- Type: CUSTOM
Name: linking
+ DataSize: 10
+ DataAlignment: 2
SymbolInfo:
- Name: function_export
Flags: 1
@@ -30,9 +45,11 @@ Sections:
# CHECK: Index: 1
# CHECK: - Name: global_export
# CHECK: Kind: GLOBAL
-# CHECK: Index: 2
+# CHECK: Index: 0
# CHECK: - Type: CUSTOM
# CHECK: Name: linking
+# CHECK: DataSize: 10
+# CHECK: DataAlignment: 2
# CHECK: SymbolInfo:
# CHECK: - Name: function_export
# CHECK: Flags: 1
diff --git a/test/Other/new-pm-defaults.ll b/test/Other/new-pm-defaults.ll
index c5d10a0a67e34..fbecb34aa4b7c 100644
--- a/test/Other/new-pm-defaults.ll
+++ b/test/Other/new-pm-defaults.ll
@@ -74,6 +74,7 @@
; CHECK-O-NEXT: Starting llvm::Function pass manager run.
; CHECK-O-NEXT: Running pass: SROA
; CHECK-O-NEXT: Running pass: EarlyCSEPass
+; CHECK-O-NEXT: Running analysis: MemorySSAAnalysis
; CHECK-O-NEXT: Running pass: SpeculativeExecutionPass
; CHECK-O-NEXT: Running pass: JumpThreadingPass
; CHECK-O-NEXT: Running analysis: LazyValueAnalysis
diff --git a/test/Other/new-pm-thinlto-defaults.ll b/test/Other/new-pm-thinlto-defaults.ll
index 52f475b0397d0..f5625d96d703e 100644
--- a/test/Other/new-pm-thinlto-defaults.ll
+++ b/test/Other/new-pm-thinlto-defaults.ll
@@ -9,19 +9,19 @@
;
; Prelink pipelines:
; RUN: opt -disable-verify -debug-pass-manager \
-; RUN: -passes='thinlto-pre-link<O1>' -S %s 2>&1 \
+; RUN: -passes='thinlto-pre-link<O1>,name-anon-globals' -S %s 2>&1 \
; RUN: | FileCheck %s --check-prefixes=CHECK-O,CHECK-O1,CHECK-PRELINK-O,CHECK-PRELINK-O1
; RUN: opt -disable-verify -debug-pass-manager \
-; RUN: -passes='thinlto-pre-link<O2>' -S %s 2>&1 \
+; RUN: -passes='thinlto-pre-link<O2>,name-anon-globals' -S %s 2>&1 \
; RUN: | FileCheck %s --check-prefixes=CHECK-O,CHECK-O2,CHECK-PRELINK-O,CHECK-PRELINK-O2
; RUN: opt -disable-verify -debug-pass-manager \
-; RUN: -passes='thinlto-pre-link<O3>' -S %s 2>&1 \
+; RUN: -passes='thinlto-pre-link<O3>,name-anon-globals' -S %s 2>&1 \
; RUN: | FileCheck %s --check-prefixes=CHECK-O,CHECK-O3,CHECK-PRELINK-O,CHECK-PRELINK-O3
; RUN: opt -disable-verify -debug-pass-manager \
-; RUN: -passes='thinlto-pre-link<Os>' -S %s 2>&1 \
+; RUN: -passes='thinlto-pre-link<Os>,name-anon-globals' -S %s 2>&1 \
; RUN: | FileCheck %s --check-prefixes=CHECK-O,CHECK-Os,CHECK-PRELINK-O,CHECK-PRELINK-Os
; RUN: opt -disable-verify -debug-pass-manager \
-; RUN: -passes='thinlto-pre-link<Oz>' -S %s 2>&1 \
+; RUN: -passes='thinlto-pre-link<Oz>,name-anon-globals' -S %s 2>&1 \
; RUN: | FileCheck %s --check-prefixes=CHECK-O,CHECK-Oz,CHECK-PRELINK-O,CHECK-PRELINK-Oz
;
; Postlink pipelines:
@@ -90,6 +90,7 @@
; CHECK-O-NEXT: Starting llvm::Function pass manager run.
; CHECK-O-NEXT: Running pass: SROA
; CHECK-O-NEXT: Running pass: EarlyCSEPass
+; CHECK-O-NEXT: Running analysis: MemorySSAAnalysis
; CHECK-O-NEXT: Running pass: SpeculativeExecutionPass
; CHECK-O-NEXT: Running pass: JumpThreadingPass
; CHECK-O-NEXT: Running analysis: LazyValueAnalysis
@@ -153,7 +154,6 @@
; CHECK-O-NEXT: Finished CGSCC pass manager run.
; CHECK-O-NEXT: Finished llvm::Module pass manager run.
; CHECK-PRELINK-O-NEXT: Running pass: GlobalOptPass
-; CHECK-PRELINK-O-NEXT: Running pass: NameAnonGlobalPass
; CHECK-POSTLINK-O-NEXT: Running pass: PassManager<{{.*}}Module{{.*}}>
; CHECK-POSTLINK-O-NEXT: Starting llvm::Module pass manager run.
; CHECK-POSTLINK-O-NEXT: Running pass: GlobalOptPass
@@ -187,6 +187,7 @@
; CHECK-POSTLINK-O-NEXT: Running pass: ConstantMergePass
; CHECK-POSTLINK-O-NEXT: Finished llvm::Module pass manager run.
; CHECK-O-NEXT: Finished llvm::Module pass manager run.
+; CHECK-PRELINK-O-NEXT: Running pass: NameAnonGlobalPass
; CHECK-O-NEXT: Running pass: PrintModulePass
; Make sure we get the IR back out without changes when we print the module.
diff --git a/test/ThinLTO/X86/autoupgrade.ll b/test/ThinLTO/X86/autoupgrade.ll
index cbbe833d262ab..2188d031c4396 100644
--- a/test/ThinLTO/X86/autoupgrade.ll
+++ b/test/ThinLTO/X86/autoupgrade.ll
@@ -10,7 +10,7 @@
; RUN: | llvm-bcanalyzer -dump | FileCheck %s
; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0i8'
+; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0i8{{.*}}'
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
diff --git a/test/Transforms/BBVectorize/X86/cmp-types.ll b/test/Transforms/BBVectorize/X86/cmp-types.ll
deleted file mode 100644
index fc1da1b0c609b..0000000000000
--- a/test/Transforms/BBVectorize/X86/cmp-types.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -S | FileCheck %s
-
-%"struct.btSoftBody" = type { float, float, float*, i8 }
-
-define void @test1(%"struct.btSoftBody"* %n1, %"struct.btSoftBody"* %n2) uwtable align 2 {
-entry:
- %tobool15 = icmp ne %"struct.btSoftBody"* %n1, null
- %cond16 = zext i1 %tobool15 to i32
- %tobool21 = icmp ne %"struct.btSoftBody"* %n2, null
- %cond22 = zext i1 %tobool21 to i32
- ret void
-; CHECK-LABEL: @test1(
-}
-
diff --git a/test/Transforms/BBVectorize/X86/loop1.ll b/test/Transforms/BBVectorize/X86/loop1.ll
deleted file mode 100644
index a533713609a79..0000000000000
--- a/test/Transforms/BBVectorize/X86/loop1.ll
+++ /dev/null
@@ -1,61 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -basicaa -loop-unroll -unroll-partial-threshold=45 -unroll-allow-partial -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-UNRL
-; The second check covers the use of alias analysis (with loop unrolling).
-
-define void @test1(double* noalias %out, double* noalias %in1, double* noalias %in2) nounwind uwtable {
-entry:
- br label %for.body
-; CHECK-LABEL: @test1(
-; CHECK-UNRL-LABEL: @test1(
-
-for.body: ; preds = %for.body, %entry
- %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
- %0 = load double, double* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
- %1 = load double, double* %arrayidx2, align 8
- %mul = fmul double %0, %0
- %mul3 = fmul double %0, %1
- %add = fadd double %mul, %mul3
- %add4 = fadd double %1, %1
- %add5 = fadd double %add4, %0
- %mul6 = fmul double %0, %add5
- %add7 = fadd double %add, %mul6
- %mul8 = fmul double %1, %1
- %add9 = fadd double %0, %0
- %add10 = fadd double %add9, %0
- %mul11 = fmul double %mul8, %add10
- %add12 = fadd double %add7, %mul11
- %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
- store double %add12, double* %arrayidx14, align 8
- %indvars.iv.next = add i64 %indvars.iv, 1
- %lftr.wideiv = trunc i64 %indvars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, 10
- br i1 %exitcond, label %for.end, label %for.body
-; CHECK: insertelement
-; CHECK-NEXT: insertelement
-; CHECK-NEXT: fadd <2 x double>
-; CHECK-NEXT: insertelement
-; CHECK-NEXT: shufflevector
-; CHECK-NEXT: fadd <2 x double>
-; CHECK-NEXT: insertelement
-; CHECK-NEXT: fmul <2 x double>
-
-; CHECK-UNRL: %mul = fmul <2 x double> %2, %2
-; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3
-; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
-; CHECK-UNRL: %add4 = fadd <2 x double> %3, %3
-; CHECK-UNRL: %add5 = fadd <2 x double> %add4, %2
-; CHECK-UNRL: %mul6 = fmul <2 x double> %2, %add5
-; CHECK-UNRL: %add7 = fadd <2 x double> %add, %mul6
-; CHECK-UNRL: %mul8 = fmul <2 x double> %3, %3
-; CHECK-UNRL: %add9 = fadd <2 x double> %2, %2
-; CHECK-UNRL: %add10 = fadd <2 x double> %add9, %2
-; CHECK-UNRL: %mul11 = fmul <2 x double> %mul8, %add10
-; CHECK-UNRL: %add12 = fadd <2 x double> %add7, %mul11
-
-for.end: ; preds = %for.body
- ret void
-}
diff --git a/test/Transforms/BBVectorize/X86/pr15289.ll b/test/Transforms/BBVectorize/X86/pr15289.ll
deleted file mode 100644
index a383a260fafd9..0000000000000
--- a/test/Transforms/BBVectorize/X86/pr15289.ll
+++ /dev/null
@@ -1,95 +0,0 @@
-; RUN: opt < %s -basicaa -bb-vectorize -disable-output
-; This is a bugpoint-reduced test case. It did not always assert, but does reproduce the bug
-; and running under valgrind (or some similar tool) will catch the error.
-
-target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin12.2.0"
-
-%0 = type { [10 x { float, float }], [10 x { float, float }], [10 x { float, float }], [10 x { float, float }], [10 x { float, float }] }
-%1 = type { [10 x [8 x i8]] }
-%2 = type { i64, i64 }
-%3 = type { [10 x i64], i64, i64, i64, i64, i64 }
-%4 = type { i64, i64, i64, i64, i64, i64 }
-%5 = type { [10 x i64] }
-%6 = type { [10 x float], [10 x float], [10 x float], [10 x float] }
-%struct.__st_parameter_dt.1.3.5.7 = type { %struct.__st_parameter_common.0.2.4.6, i64, i64*, i64*, i8*, i8*, i32, i32, i8*, i8*, i32, i32, i8*, [256 x i8], i32*, i64, i8*, i32, i32, i8*, i8*, i32, i32, i8*, i8*, i32, i32, i8*, i8*, i32, [4 x i8] }
-%struct.__st_parameter_common.0.2.4.6 = type { i32, i32, i8*, i32, i32, i8*, i32* }
-
-@cctenso_ = external unnamed_addr global %0, align 32
-@ctenso_ = external unnamed_addr global %1, align 32
-@i_dim_ = external unnamed_addr global %2, align 16
-@itenso1_ = external unnamed_addr global %3, align 32
-@itenso2_ = external unnamed_addr global %4, align 32
-@ltenso_ = external unnamed_addr global %5, align 32
-@rtenso_ = external unnamed_addr global %6, align 32
-@.cst = external unnamed_addr constant [8 x i8], align 8
-@.cst1 = external unnamed_addr constant [3 x i8], align 8
-@.cst2 = external unnamed_addr constant [29 x i8], align 8
-@.cst3 = external unnamed_addr constant [32 x i8], align 64
-
-define void @cart_to_dc2y_(double* noalias nocapture %xx, double* noalias nocapture %yy, double* noalias nocapture %zz, [5 x { double, double }]* noalias nocapture %c2ten) nounwind uwtable {
-entry:
- %0 = fmul double undef, undef
- %1 = fmul double undef, undef
- %2 = fadd double undef, undef
- %3 = fmul double undef, 0x3FE8B8B76E3E9919
- %4 = fsub double %0, %1
- %5 = fsub double -0.000000e+00, undef
- %6 = fmul double undef, undef
- %7 = fmul double %4, %6
- %8 = fmul double undef, 2.000000e+00
- %9 = fmul double %8, undef
- %10 = fmul double undef, %9
- %11 = fmul double %10, undef
- %12 = fsub double undef, %7
- %13 = fmul double %3, %12
- %14 = fmul double %3, undef
- %15 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 0
- store double %13, double* %15, align 8
- %16 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 1
- %17 = fmul double undef, %8
- %18 = fmul double %17, undef
- %19 = fmul double undef, %18
- %20 = fadd double undef, undef
- %21 = fmul double %3, %19
- %22 = fsub double -0.000000e+00, %21
- %23 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 0
- store double %22, double* %23, align 8
- %24 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 1
- %25 = fmul double undef, 0x3FE42F601A8C6794
- %26 = fmul double undef, 2.000000e+00
- %27 = fsub double %26, %0
- %28 = fmul double %6, undef
- %29 = fsub double undef, %28
- %30 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 0
- store double undef, double* %30, align 8
- %31 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 1
- %32 = fmul double undef, %17
- %33 = fmul double undef, %17
- %34 = fmul double undef, %32
- %35 = fmul double undef, %33
- %36 = fsub double undef, %35
- %37 = fmul double %3, %34
- %38 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 0
- store double %37, double* %38, align 8
- %39 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 1
- %40 = fmul double undef, %8
- %41 = fmul double undef, %40
- %42 = fmul double undef, %41
- %43 = fsub double undef, %42
- %44 = fmul double %3, %43
- %45 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 0
- store double %13, double* %45, align 8
- %46 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 1
- %47 = fsub double -0.000000e+00, %14
- store double %47, double* %16, align 8
- store double undef, double* %24, align 8
- store double -0.000000e+00, double* %31, align 8
- store double undef, double* %39, align 8
- store double undef, double* %46, align 8
- ret void
-}
-
-attributes #0 = { nounwind uwtable }
-attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind }
diff --git a/test/Transforms/BBVectorize/X86/sh-rec.ll b/test/Transforms/BBVectorize/X86/sh-rec.ll
deleted file mode 100644
index 2cb9dbded2242..0000000000000
--- a/test/Transforms/BBVectorize/X86/sh-rec.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -S | FileCheck %s
-
-define void @ptoa() nounwind uwtable {
-entry:
- %call = call i8* @malloc() nounwind
- br i1 undef, label %return, label %if.end10
-
-if.end10: ; preds = %entry
- %incdec.ptr = getelementptr inbounds i8, i8* %call, i64 undef
- %call17 = call i32 @ptou() nounwind
- %incdec.ptr26.1 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -2
- store i8 undef, i8* %incdec.ptr26.1, align 1
- %div27.1 = udiv i32 %call17, 100
- %rem.2 = urem i32 %div27.1, 10
- %add2230.2 = or i32 %rem.2, 48
- %conv25.2 = trunc i32 %add2230.2 to i8
- %incdec.ptr26.2 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -3
- store i8 %conv25.2, i8* %incdec.ptr26.2, align 1
- %incdec.ptr26.3 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -4
- store i8 undef, i8* %incdec.ptr26.3, align 1
- %div27.3 = udiv i32 %call17, 10000
- %rem.4 = urem i32 %div27.3, 10
- %add2230.4 = or i32 %rem.4, 48
- %conv25.4 = trunc i32 %add2230.4 to i8
- %incdec.ptr26.4 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -5
- store i8 %conv25.4, i8* %incdec.ptr26.4, align 1
- %div27.4 = udiv i32 %call17, 100000
- %rem.5 = urem i32 %div27.4, 10
- %add2230.5 = or i32 %rem.5, 48
- %conv25.5 = trunc i32 %add2230.5 to i8
- %incdec.ptr26.5 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -6
- store i8 %conv25.5, i8* %incdec.ptr26.5, align 1
- %incdec.ptr26.6 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -7
- store i8 0, i8* %incdec.ptr26.6, align 1
- %incdec.ptr26.7 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -8
- store i8 undef, i8* %incdec.ptr26.7, align 1
- %div27.7 = udiv i32 %call17, 100000000
- %rem.8 = urem i32 %div27.7, 10
- %add2230.8 = or i32 %rem.8, 48
- %conv25.8 = trunc i32 %add2230.8 to i8
- %incdec.ptr26.8 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -9
- store i8 %conv25.8, i8* %incdec.ptr26.8, align 1
- unreachable
-
-return: ; preds = %entry
- ret void
-; CHECK-LABEL: @ptoa(
-}
-
-declare noalias i8* @malloc() nounwind
-
-declare i32 @ptou()
diff --git a/test/Transforms/BBVectorize/X86/sh-rec2.ll b/test/Transforms/BBVectorize/X86/sh-rec2.ll
deleted file mode 100644
index d7a004c213842..0000000000000
--- a/test/Transforms/BBVectorize/X86/sh-rec2.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -basicaa -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -S | FileCheck %s
-
-%struct.gsm_state.2.8.14.15.16.17.19.22.23.25.26.28.29.31.32.33.35.36.37.38.40.41.42.44.45.47.48.50.52.53.54.56.57.58.59.60.61.62.63.66.73.83.84.89.90.91.92.93.94.95.96.99.100.101.102.103.104.106.107.114.116.121.122.129.130.135.136.137.138.139.140.141.142.143.144.147.148.149.158.159.160.161.164.165.166.167.168.169.172.179.181.182.183.188.195.200.201.202.203.204.205.208.209.210.212.213.214.215.222.223.225.226.230.231.232.233.234.235.236.237.238.239.240.241.242.243.244.352 = type { [280 x i16], i16, i64, i32, [8 x i16], [2 x [8 x i16]], i16, i16, [9 x i16], i16, i8, i8 }
-
-define void @gsm_encode(%struct.gsm_state.2.8.14.15.16.17.19.22.23.25.26.28.29.31.32.33.35.36.37.38.40.41.42.44.45.47.48.50.52.53.54.56.57.58.59.60.61.62.63.66.73.83.84.89.90.91.92.93.94.95.96.99.100.101.102.103.104.106.107.114.116.121.122.129.130.135.136.137.138.139.140.141.142.143.144.147.148.149.158.159.160.161.164.165.166.167.168.169.172.179.181.182.183.188.195.200.201.202.203.204.205.208.209.210.212.213.214.215.222.223.225.226.230.231.232.233.234.235.236.237.238.239.240.241.242.243.244.352* %s, i16* %source, i8* %c) nounwind uwtable {
-entry:
- %xmc = alloca [52 x i16], align 16
- %arraydecay5 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 0
- call void @Gsm_Coder(%struct.gsm_state.2.8.14.15.16.17.19.22.23.25.26.28.29.31.32.33.35.36.37.38.40.41.42.44.45.47.48.50.52.53.54.56.57.58.59.60.61.62.63.66.73.83.84.89.90.91.92.93.94.95.96.99.100.101.102.103.104.106.107.114.116.121.122.129.130.135.136.137.138.139.140.141.142.143.144.147.148.149.158.159.160.161.164.165.166.167.168.169.172.179.181.182.183.188.195.200.201.202.203.204.205.208.209.210.212.213.214.215.222.223.225.226.230.231.232.233.234.235.236.237.238.239.240.241.242.243.244.352* %s, i16* %source, i16* undef, i16* null, i16* undef, i16* undef, i16* undef, i16* %arraydecay5) nounwind
- %incdec.ptr136 = getelementptr inbounds i8, i8* %c, i64 10
- %incdec.ptr157 = getelementptr inbounds i8, i8* %c, i64 11
- store i8 0, i8* %incdec.ptr136, align 1
- %arrayidx162 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 11
- %0 = load i16, i16* %arrayidx162, align 2
- %conv1631 = trunc i16 %0 to i8
- %and164 = shl i8 %conv1631, 3
- %shl165 = and i8 %and164, 56
- %incdec.ptr172 = getelementptr inbounds i8, i8* %c, i64 12
- store i8 %shl165, i8* %incdec.ptr157, align 1
- %1 = load i16, i16* inttoptr (i64 2 to i16*), align 2
- %conv1742 = trunc i16 %1 to i8
- %and175 = shl i8 %conv1742, 1
- %incdec.ptr183 = getelementptr inbounds i8, i8* %c, i64 13
- store i8 %and175, i8* %incdec.ptr172, align 1
- %incdec.ptr199 = getelementptr inbounds i8, i8* %c, i64 14
- store i8 0, i8* %incdec.ptr183, align 1
- %arrayidx214 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 15
- %incdec.ptr220 = getelementptr inbounds i8, i8* %c, i64 15
- store i8 0, i8* %incdec.ptr199, align 1
- %2 = load i16, i16* %arrayidx214, align 2
- %conv2223 = trunc i16 %2 to i8
- %and223 = shl i8 %conv2223, 6
- %incdec.ptr235 = getelementptr inbounds i8, i8* %c, i64 16
- store i8 %and223, i8* %incdec.ptr220, align 1
- %arrayidx240 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 19
- %3 = load i16, i16* %arrayidx240, align 2
- %conv2414 = trunc i16 %3 to i8
- %and242 = shl i8 %conv2414, 2
- %shl243 = and i8 %and242, 28
- %incdec.ptr251 = getelementptr inbounds i8, i8* %c, i64 17
- store i8 %shl243, i8* %incdec.ptr235, align 1
- %incdec.ptr272 = getelementptr inbounds i8, i8* %c, i64 18
- store i8 0, i8* %incdec.ptr251, align 1
- %arrayidx282 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 25
- %4 = load i16, i16* %arrayidx282, align 2
- %conv2835 = trunc i16 %4 to i8
- %and284 = and i8 %conv2835, 7
- %incdec.ptr287 = getelementptr inbounds i8, i8* %c, i64 19
- store i8 %and284, i8* %incdec.ptr272, align 1
- %incdec.ptr298 = getelementptr inbounds i8, i8* %c, i64 20
- store i8 0, i8* %incdec.ptr287, align 1
- %incdec.ptr314 = getelementptr inbounds i8, i8* %c, i64 21
- store i8 0, i8* %incdec.ptr298, align 1
- %arrayidx319 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 26
- %5 = load i16, i16* %arrayidx319, align 4
- %conv3206 = trunc i16 %5 to i8
- %and321 = shl i8 %conv3206, 4
- %shl322 = and i8 %and321, 112
- %incdec.ptr335 = getelementptr inbounds i8, i8* %c, i64 22
- store i8 %shl322, i8* %incdec.ptr314, align 1
- %arrayidx340 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 29
- %6 = load i16, i16* %arrayidx340, align 2
- %conv3417 = trunc i16 %6 to i8
- %and342 = shl i8 %conv3417, 3
- %shl343 = and i8 %and342, 56
- %incdec.ptr350 = getelementptr inbounds i8, i8* %c, i64 23
- store i8 %shl343, i8* %incdec.ptr335, align 1
- %incdec.ptr366 = getelementptr inbounds i8, i8* %c, i64 24
- store i8 0, i8* %incdec.ptr350, align 1
- %arrayidx381 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 36
- %incdec.ptr387 = getelementptr inbounds i8, i8* %c, i64 25
- store i8 0, i8* %incdec.ptr366, align 1
- %7 = load i16, i16* %arrayidx381, align 8
- %conv3898 = trunc i16 %7 to i8
- %and390 = shl i8 %conv3898, 6
- store i8 %and390, i8* %incdec.ptr387, align 1
- unreachable
-; CHECK-LABEL: @gsm_encode(
-}
-
-declare void @Gsm_Coder(%struct.gsm_state.2.8.14.15.16.17.19.22.23.25.26.28.29.31.32.33.35.36.37.38.40.41.42.44.45.47.48.50.52.53.54.56.57.58.59.60.61.62.63.66.73.83.84.89.90.91.92.93.94.95.96.99.100.101.102.103.104.106.107.114.116.121.122.129.130.135.136.137.138.139.140.141.142.143.144.147.148.149.158.159.160.161.164.165.166.167.168.169.172.179.181.182.183.188.195.200.201.202.203.204.205.208.209.210.212.213.214.215.222.223.225.226.230.231.232.233.234.235.236.237.238.239.240.241.242.243.244.352*, i16*, i16*, i16*, i16*, i16*, i16*, i16*)
-
-declare void @llvm.trap() noreturn nounwind
diff --git a/test/Transforms/BBVectorize/X86/sh-rec3.ll b/test/Transforms/BBVectorize/X86/sh-rec3.ll
deleted file mode 100644
index 2096deb08a90e..0000000000000
--- a/test/Transforms/BBVectorize/X86/sh-rec3.ll
+++ /dev/null
@@ -1,170 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -basicaa -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -S | FileCheck %s
-
-%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565 = type { [280 x i16], i16, i64, i32, [8 x i16], [2 x [8 x i16]], i16, i16, [9 x i16], i16, i8, i8 }
-
-define void @gsm_encode(%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565* %s, i16* %source, i8* %c) nounwind uwtable {
-entry:
- %LARc28 = alloca [2 x i64], align 16
- %LARc28.sub = getelementptr inbounds [2 x i64], [2 x i64]* %LARc28, i64 0, i64 0
- %tmpcast = bitcast [2 x i64]* %LARc28 to [8 x i16]*
- %Nc = alloca [4 x i16], align 2
- %Mc = alloca [4 x i16], align 2
- %bc = alloca [4 x i16], align 2
- %xmc = alloca [52 x i16], align 16
- %arraydecay = bitcast [2 x i64]* %LARc28 to i16*
- %arraydecay1 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 0
- %arraydecay2 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 0
- %arraydecay3 = getelementptr inbounds [4 x i16], [4 x i16]* %Mc, i64 0, i64 0
- %arraydecay5 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 0
- call void @Gsm_Coder(%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565* %s, i16* %source, i16* %arraydecay, i16* %arraydecay1, i16* %arraydecay2, i16* %arraydecay3, i16* undef, i16* %arraydecay5) nounwind
- %0 = load i64, i64* %LARc28.sub, align 16
- %1 = trunc i64 %0 to i32
- %conv1 = lshr i32 %1, 2
- %and = and i32 %conv1, 15
- %or = or i32 %and, 208
- %conv6 = trunc i32 %or to i8
- %incdec.ptr = getelementptr inbounds i8, i8* %c, i64 1
- store i8 %conv6, i8* %c, align 1
- %conv84 = trunc i64 %0 to i8
- %and9 = shl i8 %conv84, 6
- %incdec.ptr15 = getelementptr inbounds i8, i8* %c, i64 2
- store i8 %and9, i8* %incdec.ptr, align 1
- %2 = lshr i64 %0, 50
- %shr226.tr = trunc i64 %2 to i8
- %conv25 = and i8 %shr226.tr, 7
- %incdec.ptr26 = getelementptr inbounds i8, i8* %c, i64 3
- store i8 %conv25, i8* %incdec.ptr15, align 1
- %incdec.ptr42 = getelementptr inbounds i8, i8* %c, i64 4
- store i8 0, i8* %incdec.ptr26, align 1
- %arrayidx52 = getelementptr inbounds [8 x i16], [8 x i16]* %tmpcast, i64 0, i64 7
- %3 = load i16, i16* %arrayidx52, align 2
- %conv537 = trunc i16 %3 to i8
- %and54 = and i8 %conv537, 7
- %incdec.ptr57 = getelementptr inbounds i8, i8* %c, i64 5
- store i8 %and54, i8* %incdec.ptr42, align 1
- %incdec.ptr68 = getelementptr inbounds i8, i8* %c, i64 6
- store i8 0, i8* %incdec.ptr57, align 1
- %4 = load i16, i16* %arraydecay3, align 2
- %conv748 = trunc i16 %4 to i8
- %and75 = shl i8 %conv748, 5
- %shl76 = and i8 %and75, 96
- %incdec.ptr84 = getelementptr inbounds i8, i8* %c, i64 7
- store i8 %shl76, i8* %incdec.ptr68, align 1
- %arrayidx94 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 1
- %5 = load i16, i16* %arrayidx94, align 2
- %conv959 = trunc i16 %5 to i8
- %and96 = shl i8 %conv959, 1
- %shl97 = and i8 %and96, 14
- %or103 = or i8 %shl97, 1
- %incdec.ptr105 = getelementptr inbounds i8, i8* %c, i64 8
- store i8 %or103, i8* %incdec.ptr84, align 1
- %arrayidx115 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 4
- %6 = bitcast i16* %arrayidx115 to i32*
- %7 = load i32, i32* %6, align 8
- %conv11610 = trunc i32 %7 to i8
- %and117 = and i8 %conv11610, 7
- %incdec.ptr120 = getelementptr inbounds i8, i8* %c, i64 9
- store i8 %and117, i8* %incdec.ptr105, align 1
- %8 = lshr i32 %7, 16
- %and12330 = shl nuw nsw i32 %8, 5
- %and123 = trunc i32 %and12330 to i8
- %incdec.ptr136 = getelementptr inbounds i8, i8* %c, i64 10
- store i8 %and123, i8* %incdec.ptr120, align 1
- %incdec.ptr157 = getelementptr inbounds i8, i8* %c, i64 11
- store i8 0, i8* %incdec.ptr136, align 1
- %incdec.ptr172 = getelementptr inbounds i8, i8* %c, i64 12
- store i8 0, i8* %incdec.ptr157, align 1
- %arrayidx173 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 1
- %9 = load i16, i16* %arrayidx173, align 2
- %conv17412 = zext i16 %9 to i32
- %and175 = shl nuw nsw i32 %conv17412, 1
- %arrayidx177 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 1
- %10 = load i16, i16* %arrayidx177, align 2
- %conv17826 = zext i16 %10 to i32
- %shr17913 = lshr i32 %conv17826, 1
- %and180 = and i32 %shr17913, 1
- %or181 = or i32 %and175, %and180
- %conv182 = trunc i32 %or181 to i8
- %incdec.ptr183 = getelementptr inbounds i8, i8* %c, i64 13
- store i8 %conv182, i8* %incdec.ptr172, align 1
- %arrayidx188 = getelementptr inbounds [4 x i16], [4 x i16]* %Mc, i64 0, i64 1
- %11 = load i16, i16* %arrayidx188, align 2
- %conv18914 = trunc i16 %11 to i8
- %and190 = shl i8 %conv18914, 5
- %shl191 = and i8 %and190, 96
- %incdec.ptr199 = getelementptr inbounds i8, i8* %c, i64 14
- store i8 %shl191, i8* %incdec.ptr183, align 1
- %arrayidx209 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 14
- %12 = load i16, i16* %arrayidx209, align 4
- %conv21015 = trunc i16 %12 to i8
- %and211 = shl i8 %conv21015, 1
- %shl212 = and i8 %and211, 14
- %or218 = or i8 %shl212, 1
- %incdec.ptr220 = getelementptr inbounds i8, i8* %c, i64 15
- store i8 %or218, i8* %incdec.ptr199, align 1
- %arrayidx225 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 16
- %13 = bitcast i16* %arrayidx225 to i64*
- %14 = load i64, i64* %13, align 16
- %conv22616 = trunc i64 %14 to i8
- %and227 = shl i8 %conv22616, 3
- %shl228 = and i8 %and227, 56
- %incdec.ptr235 = getelementptr inbounds i8, i8* %c, i64 16
- store i8 %shl228, i8* %incdec.ptr220, align 1
- %15 = lshr i64 %14, 32
- %and23832 = shl nuw nsw i64 %15, 5
- %and238 = trunc i64 %and23832 to i8
- %incdec.ptr251 = getelementptr inbounds i8, i8* %c, i64 17
- store i8 %and238, i8* %incdec.ptr235, align 1
- %arrayidx266 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 23
- %incdec.ptr272 = getelementptr inbounds i8, i8* %c, i64 18
- store i8 0, i8* %incdec.ptr251, align 1
- %16 = load i16, i16* %arrayidx266, align 2
- %conv27418 = trunc i16 %16 to i8
- %and275 = shl i8 %conv27418, 6
- %incdec.ptr287 = getelementptr inbounds i8, i8* %c, i64 19
- store i8 %and275, i8* %incdec.ptr272, align 1
- %arrayidx288 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 2
- %17 = load i16, i16* %arrayidx288, align 2
- %conv28919 = zext i16 %17 to i32
- %and290 = shl nuw nsw i32 %conv28919, 1
- %arrayidx292 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 2
- %18 = load i16, i16* %arrayidx292, align 2
- %conv29327 = zext i16 %18 to i32
- %shr29420 = lshr i32 %conv29327, 1
- %and295 = and i32 %shr29420, 1
- %or296 = or i32 %and290, %and295
- %conv297 = trunc i32 %or296 to i8
- %incdec.ptr298 = getelementptr inbounds i8, i8* %c, i64 20
- store i8 %conv297, i8* %incdec.ptr287, align 1
- %conv30021 = trunc i16 %18 to i8
- %and301 = shl i8 %conv30021, 7
- %incdec.ptr314 = getelementptr inbounds i8, i8* %c, i64 21
- store i8 %and301, i8* %incdec.ptr298, align 1
- %incdec.ptr335 = getelementptr inbounds i8, i8* %c, i64 22
- store i8 0, i8* %incdec.ptr314, align 1
- %arrayidx340 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 29
- %19 = load i16, i16* %arrayidx340, align 2
- %conv34122 = trunc i16 %19 to i8
- %and342 = shl i8 %conv34122, 3
- %shl343 = and i8 %and342, 56
- %incdec.ptr350 = getelementptr inbounds i8, i8* %c, i64 23
- store i8 %shl343, i8* %incdec.ptr335, align 1
- %arrayidx355 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 32
- %20 = bitcast i16* %arrayidx355 to i32*
- %21 = load i32, i32* %20, align 16
- %conv35623 = shl i32 %21, 2
- %shl358 = and i32 %conv35623, 28
- %22 = lshr i32 %21, 17
- %and363 = and i32 %22, 3
- %or364 = or i32 %shl358, %and363
- %conv365 = trunc i32 %or364 to i8
- store i8 %conv365, i8* %incdec.ptr350, align 1
- unreachable
-; CHECK-LABEL: @gsm_encode(
-}
-
-declare void @Gsm_Coder(%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565*, i16*, i16*, i16*, i16*, i16*, i16*, i16*)
-
-declare void @llvm.trap() noreturn nounwind
diff --git a/test/Transforms/BBVectorize/X86/sh-types.ll b/test/Transforms/BBVectorize/X86/sh-types.ll
deleted file mode 100644
index fbff2fb86eb0e..0000000000000
--- a/test/Transforms/BBVectorize/X86/sh-types.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -S | FileCheck %s
-
-define <4 x float> @test7(<4 x float> %A1, <4 x float> %B1, double %C1, double %C2, double %D1, double %D2) {
- %A2 = shufflevector <4 x float> %A1, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
- %B2 = shufflevector <4 x float> %B1, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
- %X1 = shufflevector <4 x float> %A2, <4 x float> undef, <2 x i32> <i32 0, i32 1>
- %X2 = shufflevector <4 x float> %B2, <4 x float> undef, <2 x i32> <i32 2, i32 3>
- %Y1 = shufflevector <2 x float> %X1, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
- %Y2 = shufflevector <2 x float> %X2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
-
- %M1 = fsub double %C1, %D1
- %M2 = fsub double %C2, %D2
- %N1 = fmul double %M1, %C1
- %N2 = fmul double %M2, %C2
- %Z1 = fadd double %N1, %D1
- %Z2 = fadd double %N2, %D2
-
- %R = fmul <4 x float> %Y1, %Y2
- ret <4 x float> %R
-; CHECK-LABEL: @test7(
-; CHECK-NOT: <8 x float>
-; CHECK: ret <4 x float>
-}
-
diff --git a/test/Transforms/BBVectorize/X86/simple-int.ll b/test/Transforms/BBVectorize/X86/simple-int.ll
deleted file mode 100644
index 7842ec85b6c81..0000000000000
--- a/test/Transforms/BBVectorize/X86/simple-int.ll
+++ /dev/null
@@ -1,79 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s
-
-declare double @llvm.fma.f64(double, double, double)
-declare double @llvm.fmuladd.f64(double, double, double)
-declare double @llvm.cos.f64(double)
-declare double @llvm.powi.f64(double, i32)
-
-; Basic depth-3 chain with fma
-define double @test1(double %A1, double %A2, double %B1, double %B2, double %C1, double %C2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.fma.f64(double %X1, double %A1, double %C1)
- %Y2 = call double @llvm.fma.f64(double %X2, double %A2, double %C2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test1(
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with fmuladd
-define double @test1a(double %A1, double %A2, double %B1, double %B2, double %C1, double %C2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.fmuladd.f64(double %X1, double %A1, double %C1)
- %Y2 = call double @llvm.fmuladd.f64(double %X2, double %A2, double %C2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test1a(
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with cos
-define double @test2(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.cos.f64(double %X1)
- %Y2 = call double @llvm.cos.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test2(
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with powi
-define double @test3(double %A1, double %A2, double %B1, double %B2, i32 %P) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.powi.f64(double %X1, i32 %P)
- %Y2 = call double @llvm.powi.f64(double %X2, i32 %P)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test3(
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with powi (different powers: should not vectorize)
-define double @test4(double %A1, double %A2, double %B1, double %B2, i32 %P) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %P2 = add i32 %P, 1
- %Y1 = call double @llvm.powi.f64(double %X1, i32 %P)
- %Y2 = call double @llvm.powi.f64(double %X2, i32 %P2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test4(
-; CHECK: ret double %R
-}
-
diff --git a/test/Transforms/BBVectorize/X86/simple-ldstr.ll b/test/Transforms/BBVectorize/X86/simple-ldstr.ll
deleted file mode 100644
index 2c05f30d08188..0000000000000
--- a/test/Transforms/BBVectorize/X86/simple-ldstr.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s
-
-; Simple 3-pair chain with loads and stores
-define void @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
-entry:
- %i0 = load double, double* %a, align 8
- %i1 = load double, double* %b, align 8
- %mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double, double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double, double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4
- store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
- store double %mul5, double* %arrayidx5, align 8
- ret void
-; CHECK-LABEL: @test1(
-; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
-; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
-; CHECK: %mul = fmul <2 x double> %i0, %i1
-; CHECK: %0 = bitcast double* %c to <2 x double>*
-; CHECK: store <2 x double> %mul, <2 x double>* %0, align 8
-; CHECK: ret void
-}
-
diff --git a/test/Transforms/BBVectorize/X86/simple.ll b/test/Transforms/BBVectorize/X86/simple.ll
deleted file mode 100644
index a11e3090f2057..0000000000000
--- a/test/Transforms/BBVectorize/X86/simple.ll
+++ /dev/null
@@ -1,120 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s
-
-; Basic depth-3 chain
-define double @test1(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test1(
-; CHECK: fsub <2 x double>
-; CHECK: fmul <2 x double>
-; CHECK: fadd <2 x double>
-; CHECK: extract
-; CHECK: extract
-; CHECK: ret double %R
-}
-
-; Basic chain
-define double @test1a(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %W1 = fadd double %Y1, %Z1
- %W2 = fadd double %Y2, %Z2
- %V1 = fadd double %W1, %Z1
- %V2 = fadd double %W2, %Z2
- %Q1 = fadd double %W1, %V1
- %Q2 = fadd double %W2, %V2
- %S1 = fadd double %W1, %Q1
- %S2 = fadd double %W2, %Q2
- %R = fmul double %S1, %S2
- ret double %R
-; CHECK-LABEL: @test1a(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %W1 = fadd <2 x double> %Y1, %Z1
-; CHECK: %V1 = fadd <2 x double> %W1, %Z1
-; CHECK: %Q1 = fadd <2 x double> %W1, %V1
-; CHECK: %S1 = fadd <2 x double> %W1, %Q1
-; CHECK: %S1.v.r1 = extractelement <2 x double> %S1, i32 0
-; CHECK: %S1.v.r2 = extractelement <2 x double> %S1, i32 1
-; CHECK: %R = fmul double %S1.v.r1, %S1.v.r2
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain (last pair permuted)
-define double @test2(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
- %Z1 = fadd double %Y2, %B1
- %Z2 = fadd double %Y1, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test2(
-; CHECK: insertelement
-; CHECK: insertelement
-; CHECK: insertelement
-; CHECK: insertelement
-; CHECK: fsub <2 x double>
-; CHECK: fmul <2 x double>
-; CHECK: ret double %R
-}
-
-; Basic depth-4 chain (internal permutation)
-define double @test4(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
- %Z1 = fadd double %Y2, %B1
- %Z2 = fadd double %Y1, %B2
- %W1 = fadd double %Y2, %Z1
- %W2 = fadd double %Y1, %Z2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test4(
-; CHECK: insertelement
-; CHECK: insertelement
-; CHECK: insertelement
-; CHECK: insertelement
-; CHECK: fsub <2 x double>
-; CHECK: fmul <2 x double>
-; CHECK: insertelement
-; CHECK: insertelement
-; CHECK: fadd <2 x double>
-; CHECK: ret double %R
-}
-
-; Basic chain with shuffles
-define <8 x i8> @test6(<8 x i8> %A1, <8 x i8> %A2, <8 x i8> %B1, <8 x i8> %B2) {
- %X1 = sub <8 x i8> %A1, %B1
- %X2 = sub <8 x i8> %A2, %B2
- %Y1 = mul <8 x i8> %X1, %A1
- %Y2 = mul <8 x i8> %X2, %A2
- %Z1 = add <8 x i8> %Y1, %B1
- %Z2 = add <8 x i8> %Y2, %B2
- %Q1 = shufflevector <8 x i8> %Z1, <8 x i8> %Z2, <8 x i32> <i32 15, i32 8, i32 6, i32 1, i32 13, i32 10, i32 4, i32 3>
- %Q2 = shufflevector <8 x i8> %Z2, <8 x i8> %Z2, <8 x i32> <i32 6, i32 7, i32 0, i32 1, i32 2, i32 4, i32 4, i32 1>
- %R = mul <8 x i8> %Q1, %Q2
- ret <8 x i8> %R
-; CHECK-LABEL: @test6(
-; CHECK-NOT: sub <16 x i8>
-; CHECK: ret <8 x i8>
-}
-
diff --git a/test/Transforms/BBVectorize/X86/vs-cast.ll b/test/Transforms/BBVectorize/X86/vs-cast.ll
deleted file mode 100644
index 0c666b11976cd..0000000000000
--- a/test/Transforms/BBVectorize/X86/vs-cast.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -S | FileCheck %s
-
-define void @main() nounwind uwtable {
-entry:
- %0 = bitcast <2 x i64> undef to i128
- %1 = bitcast <2 x i64> undef to i128
- ret void
-; CHECK-LABEL: @main(
-}
-
diff --git a/test/Transforms/BBVectorize/X86/wr-aliases.ll b/test/Transforms/BBVectorize/X86/wr-aliases.ll
deleted file mode 100644
index e34414988f32a..0000000000000
--- a/test/Transforms/BBVectorize/X86/wr-aliases.ll
+++ /dev/null
@@ -1,144 +0,0 @@
-; RUN: opt -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -disable-basicaa -bb-vectorize -S < %s | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-%class.QBezier.15 = type { double, double, double, double, double, double, double, double }
-
-; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0
-
-; Function Attrs: uwtable
-declare fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval nocapture readonly align 8) #1
-
-; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
-
-; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
-
-define void @main_arrayctor.cont([10 x %class.QBezier.15]* %beziers, %class.QBezier.15* %agg.tmp.i, %class.QBezier.15* %agg.tmp55.i, %class.QBezier.15* %agg.tmp56.i) {
-newFuncRoot:
- br label %arrayctor.cont
-
-arrayctor.cont.ret.exitStub: ; preds = %arrayctor.cont
- ret void
-
-; CHECK-LABEL: @main_arrayctor.cont
-; CHECK: <2 x double>
-; CHECK: @_ZL12printQBezier7QBezier
-; CHECK: store double %mul8.i, double* %x3.i, align 16
-; CHECK: load double, double* %x3.i, align 16
-; CHECK: ret
-
-arrayctor.cont: ; preds = %newFuncRoot
- %ref.tmp.sroa.0.0.idx = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0
- store double 1.000000e+01, double* %ref.tmp.sroa.0.0.idx, align 16
- %ref.tmp.sroa.2.0.idx1 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1
- store double 2.000000e+01, double* %ref.tmp.sroa.2.0.idx1, align 8
- %ref.tmp.sroa.3.0.idx2 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2
- store double 3.000000e+01, double* %ref.tmp.sroa.3.0.idx2, align 16
- %ref.tmp.sroa.4.0.idx3 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3
- store double 4.000000e+01, double* %ref.tmp.sroa.4.0.idx3, align 8
- %ref.tmp.sroa.5.0.idx4 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4
- store double 5.000000e+01, double* %ref.tmp.sroa.5.0.idx4, align 16
- %ref.tmp.sroa.6.0.idx5 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5
- store double 6.000000e+01, double* %ref.tmp.sroa.6.0.idx5, align 8
- %ref.tmp.sroa.7.0.idx6 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6
- store double 7.000000e+01, double* %ref.tmp.sroa.7.0.idx6, align 16
- %ref.tmp.sroa.8.0.idx7 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7
- store double 8.000000e+01, double* %ref.tmp.sroa.8.0.idx7, align 8
- %add.ptr = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1
- %v0 = bitcast %class.QBezier.15* %agg.tmp.i to i8*
- call void @llvm.lifetime.start(i64 64, i8* %v0)
- %v1 = bitcast %class.QBezier.15* %agg.tmp55.i to i8*
- call void @llvm.lifetime.start(i64 64, i8* %v1)
- %v2 = bitcast %class.QBezier.15* %agg.tmp56.i to i8*
- call void @llvm.lifetime.start(i64 64, i8* %v2)
- %v3 = bitcast [10 x %class.QBezier.15]* %beziers to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v0, i8* %v3, i64 64, i32 8, i1 false)
- call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp.i)
- %x2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2
- %v4 = load double, double* %x2.i, align 16
- %x3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4
- %v5 = load double, double* %x3.i, align 16
- %add.i = fadd double %v4, %v5
- %mul.i = fmul double 5.000000e-01, %add.i
- %x1.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0
- %v6 = load double, double* %x1.i, align 16
- %add3.i = fadd double %v4, %v6
- %mul4.i = fmul double 5.000000e-01, %add3.i
- %x25.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 2
- store double %mul4.i, double* %x25.i, align 16
- %v7 = load double, double* %x3.i, align 16
- %x4.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6
- %v8 = load double, double* %x4.i, align 16
- %add7.i = fadd double %v7, %v8
- %mul8.i = fmul double 5.000000e-01, %add7.i
- store double %mul8.i, double* %x3.i, align 16
- %v9 = load double, double* %x1.i, align 16
- %x111.i = getelementptr inbounds %class.QBezier.15, %class.QBezier.15* %add.ptr, i64 0, i32 0
- store double %v9, double* %x111.i, align 16
- %v10 = load double, double* %x25.i, align 16
- %add15.i = fadd double %mul.i, %v10
- %mul16.i = fmul double 5.000000e-01, %add15.i
- %x317.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 4
- store double %mul16.i, double* %x317.i, align 16
- %v11 = load double, double* %x3.i, align 16
- %add19.i = fadd double %mul.i, %v11
- %mul20.i = fmul double 5.000000e-01, %add19.i
- store double %mul20.i, double* %x2.i, align 16
- %v12 = load double, double* %x317.i, align 16
- %add24.i = fadd double %v12, %mul20.i
- %mul25.i = fmul double 5.000000e-01, %add24.i
- store double %mul25.i, double* %x1.i, align 16
- %x427.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 6
- store double %mul25.i, double* %x427.i, align 16
- %y2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3
- %v13 = load double, double* %y2.i, align 8
- %y3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5
- %v14 = load double, double* %y3.i, align 8
- %add28.i = fadd double %v13, %v14
- %div.i = fmul double 5.000000e-01, %add28.i
- %y1.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1
- %v15 = load double, double* %y1.i, align 8
- %add30.i = fadd double %v13, %v15
- %mul31.i = fmul double 5.000000e-01, %add30.i
- %y232.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 3
- store double %mul31.i, double* %y232.i, align 8
- %v16 = load double, double* %y3.i, align 8
- %y4.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7
- %v17 = load double, double* %y4.i, align 8
- %add34.i = fadd double %v16, %v17
- %mul35.i = fmul double 5.000000e-01, %add34.i
- store double %mul35.i, double* %y3.i, align 8
- %v18 = load double, double* %y1.i, align 8
- %y138.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 1
- store double %v18, double* %y138.i, align 8
- %v19 = load double, double* %y232.i, align 8
- %add42.i = fadd double %div.i, %v19
- %mul43.i = fmul double 5.000000e-01, %add42.i
- %y344.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 5
- store double %mul43.i, double* %y344.i, align 8
- %v20 = load double, double* %y3.i, align 8
- %add46.i = fadd double %div.i, %v20
- %mul47.i = fmul double 5.000000e-01, %add46.i
- store double %mul47.i, double* %y2.i, align 8
- %v21 = load double, double* %y344.i, align 8
- %add51.i = fadd double %v21, %mul47.i
- %mul52.i = fmul double 5.000000e-01, %add51.i
- store double %mul52.i, double* %y1.i, align 8
- %y454.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 7
- store double %mul52.i, double* %y454.i, align 8
- %v22 = bitcast %class.QBezier.15* %add.ptr to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v1, i8* %v22, i64 64, i32 8, i1 false)
- call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp55.i)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v2, i8* %v3, i64 64, i32 8, i1 false)
- call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp56.i)
- call void @llvm.lifetime.end.p0i8(i64 64, i8* %v0)
- call void @llvm.lifetime.end.p0i8(i64 64, i8* %v1)
- call void @llvm.lifetime.end.p0i8(i64 64, i8* %v2)
- br label %arrayctor.cont.ret.exitStub
-}
-
-attributes #0 = { nounwind }
-attributes #1 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/Transforms/BBVectorize/cycle.ll b/test/Transforms/BBVectorize/cycle.ll
deleted file mode 100644
index 6bfa625ea5f0d..0000000000000
--- a/test/Transforms/BBVectorize/cycle.ll
+++ /dev/null
@@ -1,112 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-
-; This test checks the non-trivial pairing-induced cycle avoidance. Without this cycle avoidance, the algorithm would otherwise
-; want to select the pairs:
-; %div77 = fdiv double %sub74, %mul76.v.r1 <-> %div125 = fdiv double %mul121, %mul76.v.r2 (div125 depends on mul117)
-; %add84 = fadd double %sub83, 2.000000e+00 <-> %add127 = fadd double %mul126, 1.000000e+00 (add127 depends on div77)
-; %mul95 = fmul double %sub45.v.r1, %sub36.v.r1 <-> %mul88 = fmul double %sub36.v.r1, %sub87 (mul88 depends on add84)
-; %mul117 = fmul double %sub39.v.r1, %sub116 <-> %mul97 = fmul double %mul96, %sub39.v.r1 (mul97 depends on mul95)
-; and so a dependency cycle would be created.
-
-declare double @fabs(double) nounwind readnone
-define void @test1(double %a, double %b, double %c, double %add80, double %mul1, double %mul2.v.r1, double %mul73, double %sub, double %sub65, double %F.0, i32 %n.0, double %Bnm3.0, double %Bnm2.0, double %Bnm1.0, double %Anm3.0, double %Anm2.0, double %Anm1.0) {
-entry:
- br label %go
-go:
- %conv = sitofp i32 %n.0 to double
- %add35 = fadd double %conv, %a
- %sub36 = fadd double %add35, -1.000000e+00
- %add38 = fadd double %conv, %b
- %sub39 = fadd double %add38, -1.000000e+00
- %add41 = fadd double %conv, %c
- %sub42 = fadd double %add41, -1.000000e+00
- %sub45 = fadd double %add35, -2.000000e+00
- %sub48 = fadd double %add38, -2.000000e+00
- %sub51 = fadd double %add41, -2.000000e+00
- %mul52 = shl nsw i32 %n.0, 1
- %sub53 = add nsw i32 %mul52, -1
- %conv54 = sitofp i32 %sub53 to double
- %sub56 = add nsw i32 %mul52, -3
- %conv57 = sitofp i32 %sub56 to double
- %sub59 = add nsw i32 %mul52, -5
- %conv60 = sitofp i32 %sub59 to double
- %mul61 = mul nsw i32 %n.0, %n.0
- %conv62 = sitofp i32 %mul61 to double
- %mul63 = fmul double %conv62, 3.000000e+00
- %mul67 = fmul double %sub65, %conv
- %add68 = fadd double %mul63, %mul67
- %add69 = fadd double %add68, 2.000000e+00
- %sub71 = fsub double %add69, %mul2.v.r1
- %sub74 = fsub double %sub71, %mul73
- %mul75 = fmul double %conv57, 2.000000e+00
- %mul76 = fmul double %mul75, %sub42
- %div77 = fdiv double %sub74, %mul76
- %mul82 = fmul double %add80, %conv
- %sub83 = fsub double %mul63, %mul82
- %add84 = fadd double %sub83, 2.000000e+00
- %sub86 = fsub double %add84, %mul2.v.r1
- %sub87 = fsub double -0.000000e+00, %sub86
- %mul88 = fmul double %sub36, %sub87
- %mul89 = fmul double %mul88, %sub39
- %mul90 = fmul double %conv54, 4.000000e+00
- %mul91 = fmul double %mul90, %conv57
- %mul92 = fmul double %mul91, %sub51
- %mul93 = fmul double %mul92, %sub42
- %div94 = fdiv double %mul89, %mul93
- %mul95 = fmul double %sub45, %sub36
- %mul96 = fmul double %mul95, %sub48
- %mul97 = fmul double %mul96, %sub39
- %sub99 = fsub double %conv, %a
- %sub100 = fadd double %sub99, -2.000000e+00
- %mul101 = fmul double %mul97, %sub100
- %sub103 = fsub double %conv, %b
- %sub104 = fadd double %sub103, -2.000000e+00
- %mul105 = fmul double %mul101, %sub104
- %mul106 = fmul double %conv57, 8.000000e+00
- %mul107 = fmul double %mul106, %conv57
- %mul108 = fmul double %mul107, %conv60
- %sub111 = fadd double %add41, -3.000000e+00
- %mul112 = fmul double %mul108, %sub111
- %mul113 = fmul double %mul112, %sub51
- %mul114 = fmul double %mul113, %sub42
- %div115 = fdiv double %mul105, %mul114
- %sub116 = fsub double -0.000000e+00, %sub36
- %mul117 = fmul double %sub39, %sub116
- %sub119 = fsub double %conv, %c
- %sub120 = fadd double %sub119, -1.000000e+00
- %mul121 = fmul double %mul117, %sub120
- %mul123 = fmul double %mul75, %sub51
- %mul124 = fmul double %mul123, %sub42
- %div125 = fdiv double %mul121, %mul124
- %mul126 = fmul double %div77, %sub
- %add127 = fadd double %mul126, 1.000000e+00
- %mul128 = fmul double %add127, %Anm1.0
- %mul129 = fmul double %div94, %sub
- %add130 = fadd double %div125, %mul129
- %mul131 = fmul double %add130, %sub
- %mul132 = fmul double %mul131, %Anm2.0
- %add133 = fadd double %mul128, %mul132
- %mul134 = fmul double %div115, %mul1
- %mul135 = fmul double %mul134, %Anm3.0
- %add136 = fadd double %add133, %mul135
- %mul139 = fmul double %add127, %Bnm1.0
- %mul143 = fmul double %mul131, %Bnm2.0
- %add144 = fadd double %mul139, %mul143
- %mul146 = fmul double %mul134, %Bnm3.0
- %add147 = fadd double %add144, %mul146
- %div148 = fdiv double %add136, %add147
- %sub149 = fsub double %F.0, %div148
- %div150 = fdiv double %sub149, %F.0
- %call = tail call double @fabs(double %div150) nounwind readnone
- %cmp = fcmp olt double %call, 0x3CB0000000000000
- %cmp152 = icmp sgt i32 %n.0, 20000
- %or.cond = or i1 %cmp, %cmp152
- br i1 %or.cond, label %done, label %go
-done:
- ret void
-; CHECK-LABEL: @test1(
-; CHECK: go:
-; CHECK: %conv.v.i0.1 = insertelement <2 x i32> undef, i32 %n.0, i32 0
-; FIXME: When tree pruning is deterministic, include the entire output.
-}
diff --git a/test/Transforms/BBVectorize/func-alias.ll b/test/Transforms/BBVectorize/func-alias.ll
deleted file mode 100644
index ab72ec0e19912..0000000000000
--- a/test/Transforms/BBVectorize/func-alias.ll
+++ /dev/null
@@ -1,244 +0,0 @@
-target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -basicaa -bb-vectorize -bb-vectorize-req-chain-depth=2 -instcombine -gvn -S | FileCheck %s
-; The chain length is set to 2 so that this will do some vectorization; check that the order of the functions is unchanged.
-
-%struct.descriptor_dimension = type { i64, i64, i64 }
-%struct.__st_parameter_common = type { i32, i32, i8*, i32, i32, i8*, i32* }
-%struct.__st_parameter_dt = type { %struct.__st_parameter_common, i64, i64*, i64*, i8*, i8*, i32, i32, i8*, i8*, i32, i32, i8*, [256 x i8], i32*, i64, i8*, i32, i32, i8*, i8*, i32, i32, i8*, i8*, i32, i32, i8*, i8*, i32, [4 x i8] }
-%"struct.array4_real(kind=4)" = type { i8*, i64, i64, [4 x %struct.descriptor_dimension] }
-%"struct.array4_integer(kind=4).73" = type { i8*, i64, i64, [4 x %struct.descriptor_dimension] }
-%struct.array4_unknown = type { i8*, i64, i64, [4 x %struct.descriptor_dimension] }
-
-@.cst4 = external unnamed_addr constant [11 x i8], align 8
-@.cst823 = external unnamed_addr constant [214 x i8], align 64
-@j.4580 = external global i32
-@j1.4581 = external global i32
-@nty1.4590 = external global [2 x i8]
-@nty2.4591 = external global [2 x i8]
-@xr1.4592 = external global float
-@xr2.4593 = external global float
-@yr1.4594 = external global float
-@yr2.4595 = external global float
-
-@__main1_MOD_iave = external unnamed_addr global i32
-@__main1_MOD_igrp = external global i32
-@__main1_MOD_iounit = external global i32
-@__main1_MOD_ityp = external global i32
-@__main1_MOD_mclmsg = external unnamed_addr global %struct.array4_unknown, align 32
-@__main1_MOD_mxdate = external unnamed_addr global %"struct.array4_integer(kind=4).73", align 32
-@__main1_MOD_rmxval = external unnamed_addr global %"struct.array4_real(kind=4)", align 32
-
-declare void @_gfortran_st_write(%struct.__st_parameter_dt*)
-declare void @_gfortran_st_write_done(%struct.__st_parameter_dt*)
-declare void @_gfortran_transfer_character_write(%struct.__st_parameter_dt*, i8*, i32)
-declare void @_gfortran_transfer_integer_write(%struct.__st_parameter_dt*, i8*, i32)
-declare void @_gfortran_transfer_real_write(%struct.__st_parameter_dt*, i8*, i32)
-
-define i1 @"prtmax__<bb 3>_<bb 34>"(%struct.__st_parameter_dt* %memtmp3, i32 %D.4627_188.reload) nounwind {
-; CHECK: prtmax__
-newFuncRoot:
- br label %"<bb 34>"
-
-codeRepl80.exitStub: ; preds = %"<bb 34>"
- ret i1 true
-
-"<bb 34>.<bb 25>_crit_edge.exitStub": ; preds = %"<bb 34>"
- ret i1 false
-
-"<bb 34>": ; preds = %newFuncRoot
- %tmp128 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
- %tmp129 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp128, i32 0, i32 2
- store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.cst4, i64 0, i64 0), i8** %tmp129, align 8
- %tmp130 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
- %tmp131 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp130, i32 0, i32 3
- store i32 31495, i32* %tmp131, align 4
- %tmp132 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 5
- store i8* getelementptr inbounds ([214 x i8], [214 x i8]* @.cst823, i64 0, i64 0), i8** %tmp132, align 8
- %tmp133 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 6
- store i32 214, i32* %tmp133, align 4
- %tmp134 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
- %tmp135 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp134, i32 0, i32 0
- store i32 4096, i32* %tmp135, align 4
- %iounit.8748_288 = load i32, i32* @__main1_MOD_iounit, align 4
- %tmp136 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
- %tmp137 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp136, i32 0, i32 1
- store i32 %iounit.8748_288, i32* %tmp137, align 4
- call void @_gfortran_st_write(%struct.__st_parameter_dt* %memtmp3) nounwind
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* @j.4580, i32 4) nounwind
-; CHECK: @_gfortran_transfer_integer_write
- %D.75807_289 = load i8*, i8** getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 0), align 8
- %j.8758_290 = load i32, i32* @j.4580, align 4
- %D.75760_291 = sext i32 %j.8758_290 to i64
- %iave.8736_292 = load i32, i32* @__main1_MOD_iave, align 4
- %D.75620_293 = sext i32 %iave.8736_292 to i64
- %D.75808_294 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 2, i32 0), align 8
- %D.75809_295 = mul nsw i64 %D.75620_293, %D.75808_294
- %igrp.8737_296 = load i32, i32* @__main1_MOD_igrp, align 4
- %D.75635_297 = sext i32 %igrp.8737_296 to i64
- %D.75810_298 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 1, i32 0), align 8
- %D.75811_299 = mul nsw i64 %D.75635_297, %D.75810_298
- %D.75812_300 = add nsw i64 %D.75809_295, %D.75811_299
- %D.75813_301 = add nsw i64 %D.75760_291, %D.75812_300
- %ityp.8750_302 = load i32, i32* @__main1_MOD_ityp, align 4
- %D.75704_303 = sext i32 %ityp.8750_302 to i64
- %D.75814_304 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 3, i32 0), align 8
- %D.75815_305 = mul nsw i64 %D.75704_303, %D.75814_304
- %D.75816_306 = add nsw i64 %D.75813_301, %D.75815_305
- %D.75817_307 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 1), align 8
- %D.75818_308 = add nsw i64 %D.75816_306, %D.75817_307
- %tmp138 = bitcast i8* %D.75807_289 to [0 x float]*
- %tmp139 = bitcast [0 x float]* %tmp138 to float*
- %D.75819_309 = getelementptr inbounds float, float* %tmp139, i64 %D.75818_308
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* %D.75819_309, i32 4) nounwind
-; CHECK: @_gfortran_transfer_real_write
- %D.75820_310 = load i8*, i8** getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 0), align 8
- %j.8758_311 = load i32, i32* @j.4580, align 4
- %D.75760_312 = sext i32 %j.8758_311 to i64
- %iave.8736_313 = load i32, i32* @__main1_MOD_iave, align 4
- %D.75620_314 = sext i32 %iave.8736_313 to i64
- %D.75821_315 = load i64, i64* getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 2, i32 0), align 8
- %D.75822_316 = mul nsw i64 %D.75620_314, %D.75821_315
- %igrp.8737_317 = load i32, i32* @__main1_MOD_igrp, align 4
- %D.75635_318 = sext i32 %igrp.8737_317 to i64
- %D.75823_319 = load i64, i64* getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 1, i32 0), align 8
- %D.75824_320 = mul nsw i64 %D.75635_318, %D.75823_319
- %D.75825_321 = add nsw i64 %D.75822_316, %D.75824_320
- %D.75826_322 = add nsw i64 %D.75760_312, %D.75825_321
- %ityp.8750_323 = load i32, i32* @__main1_MOD_ityp, align 4
- %D.75704_324 = sext i32 %ityp.8750_323 to i64
- %D.75827_325 = load i64, i64* getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 3, i32 0), align 8
- %D.75828_326 = mul nsw i64 %D.75704_324, %D.75827_325
- %D.75829_327 = add nsw i64 %D.75826_322, %D.75828_326
- %D.75830_328 = load i64, i64* getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 1), align 8
- %D.75831_329 = add nsw i64 %D.75829_327, %D.75830_328
- %tmp140 = bitcast i8* %D.75820_310 to [0 x [1 x i8]]*
- %tmp141 = bitcast [0 x [1 x i8]]* %tmp140 to [1 x i8]*
- %D.75832_330 = getelementptr inbounds [1 x i8], [1 x i8]* %tmp141, i64 %D.75831_329
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_character_write to void (%struct.__st_parameter_dt*, [1 x i8]*, i32)*)(%struct.__st_parameter_dt* %memtmp3, [1 x i8]* %D.75832_330, i32 1) nounwind
-; CHECK: @_gfortran_transfer_character_write
- %D.75833_331 = load i8*, i8** getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 0), align 8
- %j.8758_332 = load i32, i32* @j.4580, align 4
- %D.75760_333 = sext i32 %j.8758_332 to i64
- %iave.8736_334 = load i32, i32* @__main1_MOD_iave, align 4
- %D.75620_335 = sext i32 %iave.8736_334 to i64
- %D.75834_336 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 2, i32 0), align 8
- %D.75835_337 = mul nsw i64 %D.75620_335, %D.75834_336
- %igrp.8737_338 = load i32, i32* @__main1_MOD_igrp, align 4
- %D.75635_339 = sext i32 %igrp.8737_338 to i64
- %D.75836_340 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 1, i32 0), align 8
- %D.75837_341 = mul nsw i64 %D.75635_339, %D.75836_340
- %D.75838_342 = add nsw i64 %D.75835_337, %D.75837_341
- %D.75839_343 = add nsw i64 %D.75760_333, %D.75838_342
- %ityp.8750_344 = load i32, i32* @__main1_MOD_ityp, align 4
- %D.75704_345 = sext i32 %ityp.8750_344 to i64
- %D.75840_346 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 3, i32 0), align 8
- %D.75841_347 = mul nsw i64 %D.75704_345, %D.75840_346
- %D.75842_348 = add nsw i64 %D.75839_343, %D.75841_347
- %D.75843_349 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 1), align 8
- %D.75844_350 = add nsw i64 %D.75842_348, %D.75843_349
- %tmp142 = bitcast i8* %D.75833_331 to [0 x i32]*
- %tmp143 = bitcast [0 x i32]* %tmp142 to i32*
- %D.75845_351 = getelementptr inbounds i32, i32* %tmp143, i64 %D.75844_350
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* %D.75845_351, i32 4) nounwind
-; CHECK: @_gfortran_transfer_integer_write
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* @xr1.4592, i32 4) nounwind
-; CHECK: @_gfortran_transfer_real_write
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* @yr1.4594, i32 4) nounwind
-; CHECK: @_gfortran_transfer_real_write
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_character_write to void (%struct.__st_parameter_dt*, [2 x i8]*, i32)*)(%struct.__st_parameter_dt* %memtmp3, [2 x i8]* @nty1.4590, i32 2) nounwind
-; CHECK: @_gfortran_transfer_character_write
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* @j1.4581, i32 4) nounwind
-; CHECK: @_gfortran_transfer_integer_write
- %D.75807_352 = load i8*, i8** getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 0), align 8
- %j1.8760_353 = load i32, i32* @j1.4581, align 4
- %D.75773_354 = sext i32 %j1.8760_353 to i64
- %iave.8736_355 = load i32, i32* @__main1_MOD_iave, align 4
- %D.75620_356 = sext i32 %iave.8736_355 to i64
- %D.75808_357 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 2, i32 0), align 8
- %D.75809_358 = mul nsw i64 %D.75620_356, %D.75808_357
- %igrp.8737_359 = load i32, i32* @__main1_MOD_igrp, align 4
- %D.75635_360 = sext i32 %igrp.8737_359 to i64
- %D.75810_361 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 1, i32 0), align 8
- %D.75811_362 = mul nsw i64 %D.75635_360, %D.75810_361
- %D.75812_363 = add nsw i64 %D.75809_358, %D.75811_362
- %D.75846_364 = add nsw i64 %D.75773_354, %D.75812_363
- %ityp.8750_365 = load i32, i32* @__main1_MOD_ityp, align 4
- %D.75704_366 = sext i32 %ityp.8750_365 to i64
- %D.75814_367 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 3, i32 0), align 8
- %D.75815_368 = mul nsw i64 %D.75704_366, %D.75814_367
- %D.75847_369 = add nsw i64 %D.75846_364, %D.75815_368
- %D.75817_370 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)", %"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 1), align 8
- %D.75848_371 = add nsw i64 %D.75847_369, %D.75817_370
- %tmp144 = bitcast i8* %D.75807_352 to [0 x float]*
- %tmp145 = bitcast [0 x float]* %tmp144 to float*
- %D.75849_372 = getelementptr inbounds float, float* %tmp145, i64 %D.75848_371
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* %D.75849_372, i32 4) nounwind
-; CHECK: @_gfortran_transfer_real_write
- %D.75820_373 = load i8*, i8** getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 0), align 8
- %j1.8760_374 = load i32, i32* @j1.4581, align 4
- %D.75773_375 = sext i32 %j1.8760_374 to i64
- %iave.8736_376 = load i32, i32* @__main1_MOD_iave, align 4
- %D.75620_377 = sext i32 %iave.8736_376 to i64
- %D.75821_378 = load i64, i64* getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 2, i32 0), align 8
- %D.75822_379 = mul nsw i64 %D.75620_377, %D.75821_378
- %igrp.8737_380 = load i32, i32* @__main1_MOD_igrp, align 4
- %D.75635_381 = sext i32 %igrp.8737_380 to i64
- %D.75823_382 = load i64, i64* getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 1, i32 0), align 8
- %D.75824_383 = mul nsw i64 %D.75635_381, %D.75823_382
- %D.75825_384 = add nsw i64 %D.75822_379, %D.75824_383
- %D.75850_385 = add nsw i64 %D.75773_375, %D.75825_384
- %ityp.8750_386 = load i32, i32* @__main1_MOD_ityp, align 4
- %D.75704_387 = sext i32 %ityp.8750_386 to i64
- %D.75827_388 = load i64, i64* getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 3, i32 0), align 8
- %D.75828_389 = mul nsw i64 %D.75704_387, %D.75827_388
- %D.75851_390 = add nsw i64 %D.75850_385, %D.75828_389
- %D.75830_391 = load i64, i64* getelementptr inbounds (%struct.array4_unknown, %struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 1), align 8
- %D.75852_392 = add nsw i64 %D.75851_390, %D.75830_391
- %tmp146 = bitcast i8* %D.75820_373 to [0 x [1 x i8]]*
- %tmp147 = bitcast [0 x [1 x i8]]* %tmp146 to [1 x i8]*
- %D.75853_393 = getelementptr inbounds [1 x i8], [1 x i8]* %tmp147, i64 %D.75852_392
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_character_write to void (%struct.__st_parameter_dt*, [1 x i8]*, i32)*)(%struct.__st_parameter_dt* %memtmp3, [1 x i8]* %D.75853_393, i32 1) nounwind
-; CHECK: @_gfortran_transfer_character_write
- %D.75833_394 = load i8*, i8** getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 0), align 8
- %j1.8760_395 = load i32, i32* @j1.4581, align 4
- %D.75773_396 = sext i32 %j1.8760_395 to i64
- %iave.8736_397 = load i32, i32* @__main1_MOD_iave, align 4
- %D.75620_398 = sext i32 %iave.8736_397 to i64
- %D.75834_399 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 2, i32 0), align 8
- %D.75835_400 = mul nsw i64 %D.75620_398, %D.75834_399
- %igrp.8737_401 = load i32, i32* @__main1_MOD_igrp, align 4
- %D.75635_402 = sext i32 %igrp.8737_401 to i64
- %D.75836_403 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 1, i32 0), align 8
- %D.75837_404 = mul nsw i64 %D.75635_402, %D.75836_403
- %D.75838_405 = add nsw i64 %D.75835_400, %D.75837_404
- %D.75854_406 = add nsw i64 %D.75773_396, %D.75838_405
- %ityp.8750_407 = load i32, i32* @__main1_MOD_ityp, align 4
- %D.75704_408 = sext i32 %ityp.8750_407 to i64
- %D.75840_409 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 3, i32 0), align 8
- %D.75841_410 = mul nsw i64 %D.75704_408, %D.75840_409
- %D.75855_411 = add nsw i64 %D.75854_406, %D.75841_410
- %D.75843_412 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73", %"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 1), align 8
- %D.75856_413 = add nsw i64 %D.75855_411, %D.75843_412
- %tmp148 = bitcast i8* %D.75833_394 to [0 x i32]*
- %tmp149 = bitcast [0 x i32]* %tmp148 to i32*
- %D.75857_414 = getelementptr inbounds i32, i32* %tmp149, i64 %D.75856_413
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* %D.75857_414, i32 4) nounwind
-; CHECK: @_gfortran_transfer_integer_write
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* @xr2.4593, i32 4) nounwind
-; CHECK: @_gfortran_transfer_real_write
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* @yr2.4595, i32 4) nounwind
-; CHECK: @_gfortran_transfer_real_write
- call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_character_write to void (%struct.__st_parameter_dt*, [2 x i8]*, i32)*)(%struct.__st_parameter_dt* %memtmp3, [2 x i8]* @nty2.4591, i32 2) nounwind
-; CHECK: @_gfortran_transfer_character_write
- call void @_gfortran_st_write_done(%struct.__st_parameter_dt* %memtmp3) nounwind
-; CHECK: @_gfortran_st_write_done
- %j.8758_415 = load i32, i32* @j.4580, align 4
- %D.4634_416 = icmp eq i32 %j.8758_415, %D.4627_188.reload
- %j.8758_417 = load i32, i32* @j.4580, align 4
- %j.8770_418 = add nsw i32 %j.8758_417, 1
- store i32 %j.8770_418, i32* @j.4580, align 4
- %tmp150 = icmp ne i1 %D.4634_416, false
- br i1 %tmp150, label %codeRepl80.exitStub, label %"<bb 34>.<bb 25>_crit_edge.exitStub"
-}
-
diff --git a/test/Transforms/BBVectorize/ld1.ll b/test/Transforms/BBVectorize/ld1.ll
deleted file mode 100644
index 368c38aa5ce7c..0000000000000
--- a/test/Transforms/BBVectorize/ld1.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-
-define double @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
-entry:
- %i0 = load double, double* %a, align 8
- %i1 = load double, double* %b, align 8
- %mul = fmul double %i0, %i1
- %i2 = load double, double* %c, align 8
- %add = fadd double %mul, %i2
- %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double, double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double, double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4
- %arrayidx6 = getelementptr inbounds double, double* %c, i64 1
- %i5 = load double, double* %arrayidx6, align 8
- %add7 = fadd double %mul5, %i5
- %mul9 = fmul double %add, %i1
- %add11 = fadd double %mul9, %i2
- %mul13 = fmul double %add7, %i4
- %add15 = fadd double %mul13, %i5
- %mul16 = fmul double %add11, %add15
- ret double %mul16
-; CHECK-LABEL: @test1(
-; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
-; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i2.v.i0 = bitcast double* %c to <2 x double>*
-; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
-; CHECK: %mul = fmul <2 x double> %i0, %i1
-; CHECK: %i2 = load <2 x double>, <2 x double>* %i2.v.i0, align 8
-; CHECK: %add = fadd <2 x double> %mul, %i2
-; CHECK: %mul9 = fmul <2 x double> %add, %i1
-; CHECK: %add11 = fadd <2 x double> %mul9, %i2
-; CHECK: %add11.v.r1 = extractelement <2 x double> %add11, i32 0
-; CHECK: %add11.v.r2 = extractelement <2 x double> %add11, i32 1
-; CHECK: %mul16 = fmul double %add11.v.r1, %add11.v.r2
-; CHECK: ret double %mul16
-}
-
diff --git a/test/Transforms/BBVectorize/lit.local.cfg b/test/Transforms/BBVectorize/lit.local.cfg
deleted file mode 100644
index e71f3cc4c41e7..0000000000000
--- a/test/Transforms/BBVectorize/lit.local.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-if not 'X86' in config.root.targets:
- config.unsupported = True
-
diff --git a/test/Transforms/BBVectorize/loop1.ll b/test/Transforms/BBVectorize/loop1.ll
deleted file mode 100644
index 8ff5953cf46a1..0000000000000
--- a/test/Transforms/BBVectorize/loop1.ll
+++ /dev/null
@@ -1,93 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-; RUN: opt < %s -dont-improve-non-negative-phi-bits=false -basicaa -loop-unroll -unroll-threshold=45 -unroll-partial-threshold=45 -unroll-allow-partial -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-UNRL
-; The second check covers the use of alias analysis (with loop unrolling).
-
-define void @test1(double* noalias %out, double* noalias %in1, double* noalias %in2) nounwind uwtable {
-entry:
- br label %for.body
-; CHECK-LABEL: @test1(
-; CHECK-UNRL-LABEL: @test1(
-
-for.body: ; preds = %for.body, %entry
- %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
- %0 = load double, double* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
- %1 = load double, double* %arrayidx2, align 8
- %mul = fmul double %0, %0
- %mul3 = fmul double %0, %1
- %add = fadd double %mul, %mul3
- %add4 = fadd double %1, %1
- %add5 = fadd double %add4, %0
- %mul6 = fmul double %0, %add5
- %add7 = fadd double %add, %mul6
- %mul8 = fmul double %1, %1
- %add9 = fadd double %0, %0
- %add10 = fadd double %add9, %0
- %mul11 = fmul double %mul8, %add10
- %add12 = fadd double %add7, %mul11
- %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
- store double %add12, double* %arrayidx14, align 8
- %indvars.iv.next = add i64 %indvars.iv, 1
- %lftr.wideiv = trunc i64 %indvars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, 10
- br i1 %exitcond, label %for.end, label %for.body
-; CHECK: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-; CHECK: %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
-; CHECK: %0 = load double, double* %arrayidx, align 8
-; CHECK: %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
-; CHECK: %1 = load double, double* %arrayidx2, align 8
-; CHECK: %mul = fmul double %0, %0
-; CHECK: %mul3 = fmul double %0, %1
-; CHECK: %add = fadd double %mul, %mul3
-; CHECK: %mul8 = fmul double %1, %1
-; CHECK: %add4.v.i1.1 = insertelement <2 x double> undef, double %1, i32 0
-; CHECK: %add4.v.i1.2 = insertelement <2 x double> %add4.v.i1.1, double %0, i32 1
-; CHECK: %add4 = fadd <2 x double> %add4.v.i1.2, %add4.v.i1.2
-; CHECK: %2 = insertelement <2 x double> undef, double %0, i32 0
-; CHECK: %add5.v.i1.2 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
-; CHECK: %add5 = fadd <2 x double> %add4, %add5.v.i1.2
-; CHECK: %mul6.v.i0.2 = insertelement <2 x double> %2, double %mul8, i32 1
-; CHECK: %mul6 = fmul <2 x double> %mul6.v.i0.2, %add5
-; CHECK: %mul6.v.r1 = extractelement <2 x double> %mul6, i32 0
-; CHECK: %mul6.v.r2 = extractelement <2 x double> %mul6, i32 1
-; CHECK: %add7 = fadd double %add, %mul6.v.r1
-; CHECK: %add12 = fadd double %add7, %mul6.v.r2
-; CHECK: %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
-; CHECK: store double %add12, double* %arrayidx14, align 8
-; CHECK: %indvars.iv.next = add i64 %indvars.iv, 1
-; CHECK: %lftr.wideiv = trunc i64 %indvars.iv.next to i32
-; CHECK: %exitcond = icmp eq i32 %lftr.wideiv, 10
-; CHECK: br i1 %exitcond, label %for.end, label %for.body
-; CHECK-UNRL: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next.1, %for.body ]
-; CHECK-UNRL: %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
-; CHECK-UNRL: %0 = bitcast double* %arrayidx to <2 x double>*
-; CHECK-UNRL: %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
-; CHECK-UNRL: %1 = bitcast double* %arrayidx2 to <2 x double>*
-; CHECK-UNRL: %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
-; CHECK-UNRL: %2 = load <2 x double>, <2 x double>* %0, align 8
-; CHECK-UNRL: %3 = load <2 x double>, <2 x double>* %1, align 8
-; CHECK-UNRL: %mul = fmul <2 x double> %2, %2
-; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3
-; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
-; CHECK-UNRL: %add4 = fadd <2 x double> %3, %3
-; CHECK-UNRL: %add5 = fadd <2 x double> %add4, %2
-; CHECK-UNRL: %mul6 = fmul <2 x double> %2, %add5
-; CHECK-UNRL: %add7 = fadd <2 x double> %add, %mul6
-; CHECK-UNRL: %mul8 = fmul <2 x double> %3, %3
-; CHECK-UNRL: %add9 = fadd <2 x double> %2, %2
-; CHECK-UNRL: %add10 = fadd <2 x double> %add9, %2
-; CHECK-UNRL: %mul11 = fmul <2 x double> %mul8, %add10
-; CHECK-UNRL: %add12 = fadd <2 x double> %add7, %mul11
-; CHECK-UNRL: %4 = bitcast double* %arrayidx14 to <2 x double>*
-; CHECK-UNRL: store <2 x double> %add12, <2 x double>* %4, align 8
-; CHECK-UNRL: %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
-; CHECK-UNRL: %lftr.wideiv.1 = trunc i64 %indvars.iv.next.1 to i32
-; CHECK-UNRL: %exitcond.1 = icmp eq i32 %lftr.wideiv.1, 10
-; CHECK-UNRL: br i1 %exitcond.1, label %for.end, label %for.body
-
-for.end: ; preds = %for.body
- ret void
-}
diff --git a/test/Transforms/BBVectorize/mem-op-depth.ll b/test/Transforms/BBVectorize/mem-op-depth.ll
deleted file mode 100644
index 732043b7f8eca..0000000000000
--- a/test/Transforms/BBVectorize/mem-op-depth.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=6 -instcombine -gvn -S | FileCheck %s
-
-@A = common global [1024 x float] zeroinitializer, align 16
-@B = common global [1024 x float] zeroinitializer, align 16
-
-define i32 @test1() nounwind {
-; CHECK-LABEL: @test1(
- %V1 = load float, float* getelementptr inbounds ([1024 x float], [1024 x float]* @A, i64 0, i64 0), align 16
- %V2 = load float, float* getelementptr inbounds ([1024 x float], [1024 x float]* @A, i64 0, i64 1), align 4
- %V3= load float, float* getelementptr inbounds ([1024 x float], [1024 x float]* @A, i64 0, i64 2), align 8
- %V4 = load float, float* getelementptr inbounds ([1024 x float], [1024 x float]* @A, i64 0, i64 3), align 4
-; CHECK: %V1 = load <4 x float>, <4 x float>* bitcast ([1024 x float]* @A to <4 x float>*), align 16
- store float %V1, float* getelementptr inbounds ([1024 x float], [1024 x float]* @B, i64 0, i64 0), align 16
- store float %V2, float* getelementptr inbounds ([1024 x float], [1024 x float]* @B, i64 0, i64 1), align 4
- store float %V3, float* getelementptr inbounds ([1024 x float], [1024 x float]* @B, i64 0, i64 2), align 8
- store float %V4, float* getelementptr inbounds ([1024 x float], [1024 x float]* @B, i64 0, i64 3), align 4
-; CHECK-NEXT: store <4 x float> %V1, <4 x float>* bitcast ([1024 x float]* @B to <4 x float>*), align 16
- ret i32 0
-; CHECK-NEXT: ret i32 0
-}
diff --git a/test/Transforms/BBVectorize/metadata.ll b/test/Transforms/BBVectorize/metadata.ll
deleted file mode 100644
index f5580a8886160..0000000000000
--- a/test/Transforms/BBVectorize/metadata.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -S | FileCheck %s
-
-; Simple 3-pair chain with loads and stores (with fpmath)
-define void @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
-entry:
- %i0 = load double, double* %a, align 8
- %i1 = load double, double* %b, align 8
- %mul = fmul double %i0, %i1, !fpmath !2
- %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double, double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double, double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4, !fpmath !3
- store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
- store double %mul5, double* %arrayidx5, align 8
- ret void
-; CHECK-LABEL: @test1(
-; CHECK: !fpmath
-; CHECK: ret void
-}
-
-; Simple 3-pair chain with loads and stores (ints with range)
-define void @test2(i64* %a, i64* %b, i64* %c) nounwind uwtable readonly {
-entry:
- %i0 = load i64, i64* %a, align 8, !range !0
- %i1 = load i64, i64* %b, align 8
- %mul = mul i64 %i0, %i1
- %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
- %i3 = load i64, i64* %arrayidx3, align 8, !range !1
- %arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
- %i4 = load i64, i64* %arrayidx4, align 8
- %mul5 = mul i64 %i3, %i4
- store i64 %mul, i64* %c, align 8
- %arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
- store i64 %mul5, i64* %arrayidx5, align 8
- ret void
-; CHECK-LABEL: @test2(
-; CHECK-NOT: !range
-; CHECK: ret void
-}
-
-!0 = !{i64 0, i64 2}
-!1 = !{i64 3, i64 5}
-
-!2 = !{ float 5.0 }
-!3 = !{ float 2.5 }
-
diff --git a/test/Transforms/BBVectorize/no-ldstr-conn.ll b/test/Transforms/BBVectorize/no-ldstr-conn.ll
deleted file mode 100644
index a84cd65856022..0000000000000
--- a/test/Transforms/BBVectorize/no-ldstr-conn.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=2 -instcombine -gvn -S | FileCheck %s
-
-; Make sure that things (specifically getelementptr) are not connected to loads
-; and stores via the address operand (which would be bad because the address
-; is really a scalar even after vectorization)
-define i64 @test2(i64 %a) nounwind uwtable readonly {
-entry:
- %a1 = inttoptr i64 %a to i64*
- %a2 = getelementptr i64, i64* %a1, i64 1
- %a3 = getelementptr i64, i64* %a1, i64 2
- %v2 = load i64, i64* %a2, align 8
- %v3 = load i64, i64* %a3, align 8
- %v2a = add i64 %v2, 5
- %v3a = add i64 %v3, 7
- store i64 %v2a, i64* %a2, align 8
- store i64 %v3a, i64* %a3, align 8
- %r = add i64 %v2, %v3
- ret i64 %r
-; CHECK-LABEL: @test2(
-; CHECK-NOT: getelementptr i64, <2 x i64*>
-}
-
diff --git a/test/Transforms/BBVectorize/req-depth.ll b/test/Transforms/BBVectorize/req-depth.ll
deleted file mode 100644
index 2675354183a67..0000000000000
--- a/test/Transforms/BBVectorize/req-depth.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth 3 -bb-vectorize-ignore-target-info -S | FileCheck %s -check-prefix=CHECK-RD3
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth 2 -bb-vectorize-ignore-target-info -S | FileCheck %s -check-prefix=CHECK-RD2
-
-define double @test1(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
- %R = fmul double %Y1, %Y2
- ret double %R
-; CHECK-RD3-LABEL: @test1(
-; CHECK-RD2-LABEL: @test1(
-; CHECK-RD3-NOT: <2 x double>
-; CHECK-RD2: <2 x double>
-}
-
diff --git a/test/Transforms/BBVectorize/search-limit.ll b/test/Transforms/BBVectorize/search-limit.ll
deleted file mode 100644
index be38d34026039..0000000000000
--- a/test/Transforms/BBVectorize/search-limit.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-search-limit=4 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-SL4
-
-define double @test1(double %A1, double %A2, double %B1, double %B2) {
-; CHECK-LABEL: @test1(
-; CHECK-SL4-LABEL: @test1(
-; CHECK-SL4-NOT: <2 x double>
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
- %Z1 = fadd double %Y1, %B1
- ; Here we have a dependency chain: the short search limit will not
- ; see past this chain and so will not see the second part of the
- ; pair to vectorize.
- %mul41 = fmul double %Z1, %Y2
- %sub48 = fsub double %Z1, %mul41
- %mul62 = fmul double %Z1, %sub48
- %sub69 = fsub double %Z1, %mul62
- %mul83 = fmul double %Z1, %sub69
- %sub90 = fsub double %Z1, %mul83
- %mul104 = fmul double %Z1, %sub90
- %sub111 = fsub double %Z1, %mul104
- %mul125 = fmul double %Z1, %sub111
- %sub132 = fsub double %Z1, %mul125
- %mul146 = fmul double %Z1, %sub132
- %sub153 = fsub double %Z1, %mul146
- ; end of chain.
- %Z2 = fadd double %Y2, %B2
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
- %R1 = fdiv double %Z1, %Z2
- %R = fmul double %R1, %sub153
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R1 = fdiv double %Z1.v.r1, %Z1.v.r2
- ret double %R
-; CHECK: ret double %R
-}
-
diff --git a/test/Transforms/BBVectorize/simple-int.ll b/test/Transforms/BBVectorize/simple-int.ll
deleted file mode 100644
index b7f87fe1db0e2..0000000000000
--- a/test/Transforms/BBVectorize/simple-int.ll
+++ /dev/null
@@ -1,506 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-
-declare double @llvm.fma.f64(double, double, double)
-declare double @llvm.fmuladd.f64(double, double, double)
-declare double @llvm.cos.f64(double)
-declare double @llvm.powi.f64(double, i32)
-declare double @llvm.round.f64(double)
-declare double @llvm.copysign.f64(double, double)
-declare double @llvm.ceil.f64(double)
-declare double @llvm.nearbyint.f64(double)
-declare double @llvm.rint.f64(double)
-declare double @llvm.trunc.f64(double)
-declare double @llvm.floor.f64(double)
-declare double @llvm.fabs.f64(double)
-declare i64 @llvm.bswap.i64(i64)
-declare i64 @llvm.ctpop.i64(i64)
-declare i64 @llvm.ctlz.i64(i64, i1)
-declare i64 @llvm.cttz.i64(i64, i1)
-
-; Basic depth-3 chain with fma
-define double @test1(double %A1, double %A2, double %B1, double %B2, double %C1, double %C2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.fma.f64(double %X1, double %A1, double %C1)
- %Y2 = call double @llvm.fma.f64(double %X2, double %A2, double %C2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test1(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1.v.i2.1 = insertelement <2 x double> undef, double %C1, i32 0
-; CHECK: %Y1.v.i2.2 = insertelement <2 x double> %Y1.v.i2.1, double %C2, i32 1
-; CHECK: %Y1 = call <2 x double> @llvm.fma.v2f64(<2 x double> %X1, <2 x double> %X1.v.i0.2, <2 x double> %Y1.v.i2.2)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with fmuladd
-define double @test1a(double %A1, double %A2, double %B1, double %B2, double %C1, double %C2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.fmuladd.f64(double %X1, double %A1, double %C1)
- %Y2 = call double @llvm.fmuladd.f64(double %X2, double %A2, double %C2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test1a(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1.v.i2.1 = insertelement <2 x double> undef, double %C1, i32 0
-; CHECK: %Y1.v.i2.2 = insertelement <2 x double> %Y1.v.i2.1, double %C2, i32 1
-; CHECK: %Y1 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %X1, <2 x double> %X1.v.i0.2, <2 x double> %Y1.v.i2.2)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with cos
-define double @test2(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.cos.f64(double %X1)
- %Y2 = call double @llvm.cos.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test2(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.cos.v2f64(<2 x double> %X1)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with powi
-define double @test3(double %A1, double %A2, double %B1, double %B2, i32 %P) {
-
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.powi.f64(double %X1, i32 %P)
- %Y2 = call double @llvm.powi.f64(double %X2, i32 %P)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test3(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.powi.v2f64(<2 x double> %X1, i32 %P)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with powi (different powers: should not vectorize)
-define double @test4(double %A1, double %A2, double %B1, double %B2, i32 %P) {
-
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %P2 = add i32 %P, 1
- %Y1 = call double @llvm.powi.f64(double %X1, i32 %P)
- %Y2 = call double @llvm.powi.f64(double %X2, i32 %P2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK-LABEL: @test4(
-; CHECK-NOT: <2 x double>
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with round
-define double @testround(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.round.f64(double %X1)
- %Y2 = call double @llvm.round.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK: @testround
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.round.v2f64(<2 x double> %X1)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-
-}
-
-; Basic depth-3 chain with copysign
-define double @testcopysign(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.copysign.f64(double %X1, double %A1)
- %Y2 = call double @llvm.copysign.f64(double %X2, double %A1)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK: @testcopysign
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1.v.i1.2 = shufflevector <2 x double> %X1.v.i0.1, <2 x double> undef, <2 x i32> zeroinitializer
-; CHECK: %Y1 = call <2 x double> @llvm.copysign.v2f64(<2 x double> %X1, <2 x double> %Y1.v.i1.2)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-
-}
-
-; Basic depth-3 chain with ceil
-define double @testceil(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.ceil.f64(double %X1)
- %Y2 = call double @llvm.ceil.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK: @testceil
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %X1)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-
-}
-
-; Basic depth-3 chain with nearbyint
-define double @testnearbyint(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.nearbyint.f64(double %X1)
- %Y2 = call double @llvm.nearbyint.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK: @testnearbyint
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %X1)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-
-}
-
-; Basic depth-3 chain with rint
-define double @testrint(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.rint.f64(double %X1)
- %Y2 = call double @llvm.rint.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK: @testrint
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.rint.v2f64(<2 x double> %X1)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-
-}
-
-; Basic depth-3 chain with trunc
-define double @testtrunc(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.trunc.f64(double %X1)
- %Y2 = call double @llvm.trunc.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK: @testtrunc
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %X1)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-
-}
-
-; Basic depth-3 chain with floor
-define double @testfloor(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.floor.f64(double %X1)
- %Y2 = call double @llvm.floor.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK: @testfloor
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %X1)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-
-}
-
-; Basic depth-3 chain with fabs
-define double @testfabs(double %A1, double %A2, double %B1, double %B2) {
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = call double @llvm.fabs.f64(double %X1)
- %Y2 = call double @llvm.fabs.f64(double %X2)
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-; CHECK: @testfabs
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x double> @llvm.fabs.v2f64(<2 x double> %X1)
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: ret double %R
-
-}
-
-; Basic depth-3 chain with bswap
-define i64 @testbswap(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
- %X1 = sub i64 %A1, %B1
- %X2 = sub i64 %A2, %B2
- %Y1 = call i64 @llvm.bswap.i64(i64 %X1)
- %Y2 = call i64 @llvm.bswap.i64(i64 %X2)
- %Z1 = add i64 %Y1, %B1
- %Z2 = add i64 %Y2, %B2
- %R = mul i64 %Z1, %Z2
- ret i64 %R
-
-; CHECK: @testbswap
-; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
-; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %X1)
-; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
-; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
-; CHECK: ret i64 %R
-
-}
-
-; Basic depth-3 chain with ctpop
-define i64 @testctpop(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
- %X1 = sub i64 %A1, %B1
- %X2 = sub i64 %A2, %B2
- %Y1 = call i64 @llvm.ctpop.i64(i64 %X1)
- %Y2 = call i64 @llvm.ctpop.i64(i64 %X2)
- %Z1 = add i64 %Y1, %B1
- %Z2 = add i64 %Y2, %B2
- %R = mul i64 %Z1, %Z2
- ret i64 %R
-
-; CHECK: @testctpop
-; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
-; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %X1)
-; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
-; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
-; CHECK: ret i64 %R
-
-}
-
-; Basic depth-3 chain with ctlz
-define i64 @testctlz(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
- %X1 = sub i64 %A1, %B1
- %X2 = sub i64 %A2, %B2
- %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
- %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 true)
- %Z1 = add i64 %Y1, %B1
- %Z2 = add i64 %Y2, %B2
- %R = mul i64 %Z1, %Z2
- ret i64 %R
-
-; CHECK: @testctlz
-; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
-; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %X1, i1 true)
-; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
-; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
-; CHECK: ret i64 %R
-
-}
-
-; Basic depth-3 chain with ctlz
-define i64 @testctlzneg(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
- %X1 = sub i64 %A1, %B1
- %X2 = sub i64 %A2, %B2
- %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
- %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 false)
- %Z1 = add i64 %Y1, %B1
- %Z2 = add i64 %Y2, %B2
- %R = mul i64 %Z1, %Z2
- ret i64 %R
-
-; CHECK: @testctlzneg
-; CHECK: %X1 = sub i64 %A1, %B1
-; CHECK: %X2 = sub i64 %A2, %B2
-; CHECK: %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
-; CHECK: %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 false)
-; CHECK: %Z1 = add i64 %Y1, %B1
-; CHECK: %Z2 = add i64 %Y2, %B2
-; CHECK: %R = mul i64 %Z1, %Z2
-; CHECK: ret i64 %R
-}
-
-; Basic depth-3 chain with cttz
-define i64 @testcttz(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
- %X1 = sub i64 %A1, %B1
- %X2 = sub i64 %A2, %B2
- %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
- %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 true)
- %Z1 = add i64 %Y1, %B1
- %Z2 = add i64 %Y2, %B2
- %R = mul i64 %Z1, %Z2
- ret i64 %R
-
-; CHECK: @testcttz
-; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
-; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
-; CHECK: %Y1 = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %X1, i1 true)
-; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
-; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
-; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
-; CHECK: ret i64 %R
-
-}
-
-; Basic depth-3 chain with cttz
-define i64 @testcttzneg(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
- %X1 = sub i64 %A1, %B1
- %X2 = sub i64 %A2, %B2
- %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
- %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 false)
- %Z1 = add i64 %Y1, %B1
- %Z2 = add i64 %Y2, %B2
- %R = mul i64 %Z1, %Z2
- ret i64 %R
-
-; CHECK: @testcttzneg
-; CHECK: %X1 = sub i64 %A1, %B1
-; CHECK: %X2 = sub i64 %A2, %B2
-; CHECK: %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
-; CHECK: %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 false)
-; CHECK: %Z1 = add i64 %Y1, %B1
-; CHECK: %Z2 = add i64 %Y2, %B2
-; CHECK: %R = mul i64 %Z1, %Z2
-; CHECK: ret i64 %R
-}
-
-
-
-; CHECK: declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
-; CHECK: declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
-; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) #0
-; CHECK: declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) #0
-; CHECK: declare <2 x double> @llvm.round.v2f64(<2 x double>) #0
-; CHECK: declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) #0
-; CHECK: declare <2 x double> @llvm.ceil.v2f64(<2 x double>) #0
-; CHECK: declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) #0
-; CHECK: declare <2 x double> @llvm.rint.v2f64(<2 x double>) #0
-; CHECK: declare <2 x double> @llvm.trunc.v2f64(<2 x double>) #0
-; CHECK: declare <2 x double> @llvm.floor.v2f64(<2 x double>) #0
-; CHECK: declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #0
-; CHECK: declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) #0
-; CHECK: declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) #0
-; CHECK: declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) #0
-; CHECK: declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) #0
-; CHECK: attributes #0 = { nounwind readnone speculatable }
diff --git a/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll b/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll
deleted file mode 100644
index fcc0236bae9d2..0000000000000
--- a/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll
+++ /dev/null
@@ -1,134 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-aligned-only -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-AO
-
-; FIXME: re-enable this once pointer vectors work properly
-; XFAIL: *
-
-; Simple 3-pair chain also with loads and stores (using ptrs and gep)
-define double @test1(i64* %a, i64* %b, i64* %c) nounwind uwtable readonly {
-entry:
- %i0 = load i64, i64* %a, align 8
- %i1 = load i64, i64* %b, align 8
- %mul = mul i64 %i0, %i1
- %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
- %i3 = load i64, i64* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
- %i4 = load i64, i64* %arrayidx4, align 8
- %mul5 = mul i64 %i3, %i4
- %ptr = inttoptr i64 %mul to double*
- %ptr5 = inttoptr i64 %mul5 to double*
- %aptr = getelementptr inbounds double, double* %ptr, i64 2
- %aptr5 = getelementptr inbounds double, double* %ptr5, i64 3
- %av = load double, double* %aptr, align 16
- %av5 = load double, double* %aptr5, align 16
- %r = fmul double %av, %av5
- store i64 %mul, i64* %c, align 8
- %arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
- store i64 %mul5, i64* %arrayidx5, align 8
- ret double %r
-; CHECK-LABEL: @test1(
-; CHECK: %i0.v.i0 = bitcast i64* %a to <2 x i64>*
-; CHECK: %i1.v.i0 = bitcast i64* %b to <2 x i64>*
-; CHECK: %i0 = load <2 x i64>, <2 x i64>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x i64>, <2 x i64>* %i1.v.i0, align 8
-; CHECK: %mul = mul <2 x i64> %i0, %i1
-; CHECK: %ptr = inttoptr <2 x i64> %mul to <2 x double*>
-; CHECK: %aptr = getelementptr inbounds double, <2 x double*> %ptr, <2 x i64> <i64 2, i64 3>
-; CHECK: %aptr.v.r1 = extractelement <2 x double*> %aptr, i32 0
-; CHECK: %aptr.v.r2 = extractelement <2 x double*> %aptr, i32 1
-; CHECK: %av = load double, double* %aptr.v.r1, align 16
-; CHECK: %av5 = load double, double* %aptr.v.r2, align 16
-; CHECK: %r = fmul double %av, %av5
-; CHECK: %0 = bitcast i64* %c to <2 x i64>*
-; CHECK: store <2 x i64> %mul, <2 x i64>* %0, align 8
-; CHECK: ret double %r
-; CHECK-AO-LABEL: @test1(
-; CHECK-AO-NOT: load <2 x
-}
-
-; Simple 3-pair chain with loads and stores (using ptrs and gep)
-define void @test2(i64** %a, i64** %b, i64** %c) nounwind uwtable readonly {
-entry:
- %i0 = load i64*, i64** %a, align 8
- %i1 = load i64*, i64** %b, align 8
- %arrayidx3 = getelementptr inbounds i64*, i64** %a, i64 1
- %i3 = load i64*, i64** %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds i64*, i64** %b, i64 1
- %i4 = load i64*, i64** %arrayidx4, align 8
- %o1 = load i64, i64* %i1, align 8
- %o4 = load i64, i64* %i4, align 8
- %ptr0 = getelementptr inbounds i64, i64* %i0, i64 %o1
- %ptr3 = getelementptr inbounds i64, i64* %i3, i64 %o4
- store i64* %ptr0, i64** %c, align 8
- %arrayidx5 = getelementptr inbounds i64*, i64** %c, i64 1
- store i64* %ptr3, i64** %arrayidx5, align 8
- ret void
-; CHECK-LABEL: @test2(
-; CHECK: %i0.v.i0 = bitcast i64** %a to <2 x i64*>*
-; CHECK: %i1 = load i64*, i64** %b, align 8
-; CHECK: %i0 = load <2 x i64*>, <2 x i64*>* %i0.v.i0, align 8
-; CHECK: %arrayidx4 = getelementptr inbounds i64*, i64** %b, i64 1
-; CHECK: %i4 = load i64*, i64** %arrayidx4, align 8
-; CHECK: %o1 = load i64, i64* %i1, align 8
-; CHECK: %o4 = load i64, i64* %i4, align 8
-; CHECK: %ptr0.v.i1.1 = insertelement <2 x i64> undef, i64 %o1, i32 0
-; CHECK: %ptr0.v.i1.2 = insertelement <2 x i64> %ptr0.v.i1.1, i64 %o4, i32 1
-; CHECK: %ptr0 = getelementptr inbounds i64, <2 x i64*> %i0, <2 x i64> %ptr0.v.i1.2
-; CHECK: %0 = bitcast i64** %c to <2 x i64*>*
-; CHECK: store <2 x i64*> %ptr0, <2 x i64*>* %0, align 8
-; CHECK: ret void
-; CHECK-AO-LABEL: @test2(
-; CHECK-AO-NOT: <2 x
-}
-
-; Simple 3-pair chain with loads and stores (using ptrs and gep)
-; using pointer vectors.
-define void @test3(<2 x i64*>* %a, <2 x i64*>* %b, <2 x i64*>* %c) nounwind uwtable readonly {
-entry:
- %i0 = load <2 x i64*>, <2 x i64*>* %a, align 8
- %i1 = load <2 x i64*>, <2 x i64*>* %b, align 8
- %arrayidx3 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %a, i64 1
- %i3 = load <2 x i64*>, <2 x i64*>* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %b, i64 1
- %i4 = load <2 x i64*>, <2 x i64*>* %arrayidx4, align 8
- %j1 = extractelement <2 x i64*> %i1, i32 0
- %j4 = extractelement <2 x i64*> %i4, i32 0
- %o1 = load i64, i64* %j1, align 8
- %o4 = load i64, i64* %j4, align 8
- %j0 = extractelement <2 x i64*> %i0, i32 0
- %j3 = extractelement <2 x i64*> %i3, i32 0
- %ptr0 = getelementptr inbounds i64, i64* %j0, i64 %o1
- %ptr3 = getelementptr inbounds i64, i64* %j3, i64 %o4
- %qtr0 = insertelement <2 x i64*> undef, i64* %ptr0, i32 0
- %rtr0 = insertelement <2 x i64*> %qtr0, i64* %ptr0, i32 1
- %qtr3 = insertelement <2 x i64*> undef, i64* %ptr3, i32 0
- %rtr3 = insertelement <2 x i64*> %qtr3, i64* %ptr3, i32 1
- store <2 x i64*> %rtr0, <2 x i64*>* %c, align 8
- %arrayidx5 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %c, i64 1
- store <2 x i64*> %rtr3, <2 x i64*>* %arrayidx5, align 8
- ret void
-; CHECK-LABEL: @test3(
-; CHECK: %i0.v.i0 = bitcast <2 x i64*>* %a to <4 x i64*>*
-; CHECK: %i1 = load <2 x i64*>, <2 x i64*>* %b, align 8
-; CHECK: %i0 = load <4 x i64*>, <4 x i64*>* %i0.v.i0, align 8
-; CHECK: %arrayidx4 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %b, i64 1
-; CHECK: %i4 = load <2 x i64*>, <2 x i64*>* %arrayidx4, align 8
-; CHECK: %j1 = extractelement <2 x i64*> %i1, i32 0
-; CHECK: %j4 = extractelement <2 x i64*> %i4, i32 0
-; CHECK: %o1 = load i64, i64* %j1, align 8
-; CHECK: %o4 = load i64, i64* %j4, align 8
-; CHECK: %ptr0.v.i1.1 = insertelement <2 x i64> undef, i64 %o1, i32 0
-; CHECK: %ptr0.v.i1.2 = insertelement <2 x i64> %ptr0.v.i1.1, i64 %o4, i32 1
-; CHECK: %ptr0.v.i0 = shufflevector <4 x i64*> %i0, <4 x i64*> undef, <2 x i32> <i32 0, i32 2>
-; CHECK: %ptr0 = getelementptr inbounds i64, <2 x i64*> %ptr0.v.i0, <2 x i64> %ptr0.v.i1.2
-; CHECK: %rtr0 = shufflevector <2 x i64*> %ptr0, <2 x i64*> undef, <2 x i32> zeroinitializer
-; CHECK: %rtr3 = shufflevector <2 x i64*> %ptr0, <2 x i64*> undef, <2 x i32> <i32 1, i32 1>
-; CHECK: %0 = bitcast <2 x i64*>* %c to <4 x i64*>*
-; CHECK: %1 = shufflevector <2 x i64*> %rtr0, <2 x i64*> %rtr3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK: store <4 x i64*> %1, <4 x i64*>* %0, align 8
-; CHECK: ret void
-; CHECK-AO-LABEL: @test3(
-; CHECK-AO-NOT: <4 x
-}
-
diff --git a/test/Transforms/BBVectorize/simple-ldstr.ll b/test/Transforms/BBVectorize/simple-ldstr.ll
deleted file mode 100644
index 56c1a06b42eac..0000000000000
--- a/test/Transforms/BBVectorize/simple-ldstr.ll
+++ /dev/null
@@ -1,170 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-aligned-only -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-AO
-
-; Simple 3-pair chain with loads and stores
-define void @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
-entry:
- %i0 = load double, double* %a, align 8
- %i1 = load double, double* %b, align 8
- %mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double, double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double, double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4
- store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
- store double %mul5, double* %arrayidx5, align 8
- ret void
-; CHECK-LABEL: @test1(
-; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
-; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
-; CHECK: %mul = fmul <2 x double> %i0, %i1
-; CHECK: %0 = bitcast double* %c to <2 x double>*
-; CHECK: store <2 x double> %mul, <2 x double>* %0, align 8
-; CHECK: ret void
-; CHECK-AO-LABEL: @test1(
-; CHECK-AO-NOT: <2 x double>
-}
-
-; Simple chain with extending loads and stores
-define void @test2(float* %a, float* %b, double* %c) nounwind uwtable readonly {
-entry:
- %i0f = load float, float* %a, align 4
- %i0 = fpext float %i0f to double
- %i1f = load float, float* %b, align 4
- %i1 = fpext float %i1f to double
- %mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds float, float* %a, i64 1
- %i3f = load float, float* %arrayidx3, align 4
- %i3 = fpext float %i3f to double
- %arrayidx4 = getelementptr inbounds float, float* %b, i64 1
- %i4f = load float, float* %arrayidx4, align 4
- %i4 = fpext float %i4f to double
- %mul5 = fmul double %i3, %i4
- store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
- store double %mul5, double* %arrayidx5, align 8
- ret void
-; CHECK-LABEL: @test2(
-; CHECK: %i0f.v.i0 = bitcast float* %a to <2 x float>*
-; CHECK: %i1f.v.i0 = bitcast float* %b to <2 x float>*
-; CHECK: %i0f = load <2 x float>, <2 x float>* %i0f.v.i0, align 4
-; CHECK: %i0 = fpext <2 x float> %i0f to <2 x double>
-; CHECK: %i1f = load <2 x float>, <2 x float>* %i1f.v.i0, align 4
-; CHECK: %i1 = fpext <2 x float> %i1f to <2 x double>
-; CHECK: %mul = fmul <2 x double> %i0, %i1
-; CHECK: %0 = bitcast double* %c to <2 x double>*
-; CHECK: store <2 x double> %mul, <2 x double>* %0, align 8
-; CHECK: ret void
-; CHECK-AO-LABEL: @test2(
-; CHECK-AO-NOT: <2 x double>
-}
-
-; Simple chain with loads and truncating stores
-define void @test3(double* %a, double* %b, float* %c) nounwind uwtable readonly {
-entry:
- %i0 = load double, double* %a, align 8
- %i1 = load double, double* %b, align 8
- %mul = fmul double %i0, %i1
- %mulf = fptrunc double %mul to float
- %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double, double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double, double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4
- %mul5f = fptrunc double %mul5 to float
- store float %mulf, float* %c, align 8
- %arrayidx5 = getelementptr inbounds float, float* %c, i64 1
- store float %mul5f, float* %arrayidx5, align 4
- ret void
-; CHECK-LABEL: @test3(
-; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
-; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
-; CHECK: %mul = fmul <2 x double> %i0, %i1
-; CHECK: %mulf = fptrunc <2 x double> %mul to <2 x float>
-; CHECK: %0 = bitcast float* %c to <2 x float>*
-; CHECK: store <2 x float> %mulf, <2 x float>* %0, align 8
-; CHECK: ret void
-; CHECK-AO-LABEL: @test3(
-; CHECK-AO: %i0 = load double, double* %a, align 8
-; CHECK-AO: %i1 = load double, double* %b, align 8
-; CHECK-AO: %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
-; CHECK-AO: %i3 = load double, double* %arrayidx3, align 8
-; CHECK-AO: %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
-; CHECK-AO: %i4 = load double, double* %arrayidx4, align 8
-; CHECK-AO: %mul.v.i1.1 = insertelement <2 x double> undef, double %i1, i32 0
-; CHECK-AO: %mul.v.i1.2 = insertelement <2 x double> %mul.v.i1.1, double %i4, i32 1
-; CHECK-AO: %mul.v.i0.1 = insertelement <2 x double> undef, double %i0, i32 0
-; CHECK-AO: %mul.v.i0.2 = insertelement <2 x double> %mul.v.i0.1, double %i3, i32 1
-; CHECK-AO: %mul = fmul <2 x double> %mul.v.i0.2, %mul.v.i1.2
-; CHECK-AO: %mulf = fptrunc <2 x double> %mul to <2 x float>
-; CHECK-AO: %0 = bitcast float* %c to <2 x float>*
-; CHECK-AO: store <2 x float> %mulf, <2 x float>* %0, align 8
-; CHECK-AO: ret void
-}
-
-; Simple 3-pair chain with loads and stores (unreachable)
-define void @test4(i1 %bool, double* %a, double* %b, double* %c) nounwind uwtable readonly {
-entry:
- br i1 %bool, label %if.then1, label %if.end
-
-if.then1:
- unreachable
- br label %if.then
-
-if.then:
- %i0 = load double, double* %a, align 8
- %i1 = load double, double* %b, align 8
- %mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double, double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double, double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4
- store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
- store double %mul5, double* %arrayidx5, align 8
- br label %if.end
-
-if.end:
- ret void
-; CHECK-LABEL: @test4(
-; CHECK-NOT: <2 x double>
-; CHECK-AO-LABEL: @test4(
-; CHECK-AO-NOT: <2 x double>
-}
-
-; Simple 3-pair chain with loads and stores
-define void @test5(double* %a, double* %b, double* %c) nounwind uwtable readonly {
-entry:
- %i0 = load double, double* %a, align 8
- %i1 = load double, double* %b, align 8
- %mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double, double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double, double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4
- %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
- store double %mul5, double* %arrayidx5, align 8
- store double %mul, double* %c, align 4
- ret void
-; CHECK-LABEL: @test5(
-; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
-; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
-; CHECK: %mul = fmul <2 x double> %i0, %i1
-; CHECK: %0 = bitcast double* %c to <2 x double>*
-; CHECK: store <2 x double> %mul, <2 x double>* %0, align 4
-; CHECK: ret void
-; CHECK-AO-LABEL: @test5(
-; CHECK-AO-NOT: <2 x double>
-}
-
diff --git a/test/Transforms/BBVectorize/simple-sel.ll b/test/Transforms/BBVectorize/simple-sel.ll
deleted file mode 100644
index 269b07f82d194..0000000000000
--- a/test/Transforms/BBVectorize/simple-sel.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-no-bools -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-NB
-
-; Basic depth-3 chain with select
-define double @test1(double %A1, double %A2, double %B1, double %B2, i1 %C1, i1 %C2) {
-; CHECK-LABEL: @test1(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
- %Z1 = select i1 %C1, double %Y1, double %B1
- %Z2 = select i1 %C2, double %Y2, double %B2
-; CHECK: %Z1.v.i0.1 = insertelement <2 x i1> undef, i1 %C1, i32 0
-; CHECK: %Z1.v.i0.2 = insertelement <2 x i1> %Z1.v.i0.1, i1 %C2, i32 1
-; CHECK: %Z1 = select <2 x i1> %Z1.v.i0.2, <2 x double> %Y1, <2 x double> %X1.v.i1.2
- %R = fmul double %Z1, %Z2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
- ret double %R
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain with select (and vect. compare)
-define double @test2(double %A1, double %A2, double %B1, double %B2) {
-; CHECK-LABEL: @test2(
-; CHECK-NB-LABEL: @test2(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
- %C1 = fcmp ogt double %X1, %A1
- %C2 = fcmp ogt double %X2, %A2
-; CHECK: %C1 = fcmp ogt <2 x double> %X1, %X1.v.i0.2
-; CHECK-NB: fcmp ogt double
- %Z1 = select i1 %C1, double %Y1, double %B1
- %Z2 = select i1 %C2, double %Y2, double %B2
-; CHECK: %Z1 = select <2 x i1> %C1, <2 x double> %Y1, <2 x double> %X1.v.i1.2
- %R = fmul double %Z1, %Z2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
- ret double %R
-; CHECK: ret double %R
-}
-
diff --git a/test/Transforms/BBVectorize/simple-tst.ll b/test/Transforms/BBVectorize/simple-tst.ll
deleted file mode 100644
index 6a88e1b09c1b6..0000000000000
--- a/test/Transforms/BBVectorize/simple-tst.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
-target triple = "powerpc64-unknown-linux"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-vector-bits=256 -instcombine -gvn -S | FileCheck %s
-
-; Basic depth-3 chain (target-specific type should not vectorize)
-define ppc_fp128 @test7(ppc_fp128 %A1, ppc_fp128 %A2, ppc_fp128 %B1, ppc_fp128 %B2) {
-; CHECK-LABEL: @test7(
-; CHECK-NOT: <2 x ppc_fp128>
- %X1 = fsub ppc_fp128 %A1, %B1
- %X2 = fsub ppc_fp128 %A2, %B2
- %Y1 = fmul ppc_fp128 %X1, %A1
- %Y2 = fmul ppc_fp128 %X2, %A2
- %Z1 = fadd ppc_fp128 %Y1, %B1
- %Z2 = fadd ppc_fp128 %Y2, %B2
- %R = fmul ppc_fp128 %Z1, %Z2
- ret ppc_fp128 %R
-}
-
diff --git a/test/Transforms/BBVectorize/simple.ll b/test/Transforms/BBVectorize/simple.ll
deleted file mode 100644
index 0fe33f17a6460..0000000000000
--- a/test/Transforms/BBVectorize/simple.ll
+++ /dev/null
@@ -1,199 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-
-; Basic depth-3 chain
-define double @test1(double %A1, double %A2, double %B1, double %B2) {
-; CHECK-LABEL: @test1(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
- %R = fmul double %Z1, %Z2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
- ret double %R
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain (last pair permuted)
-define double @test2(double %A1, double %A2, double %B1, double %B2) {
-; CHECK-LABEL: @test2(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
- %Z1 = fadd double %Y2, %B1
- %Z2 = fadd double %Y1, %B2
-; CHECK: %Z1.v.i1.1 = insertelement <2 x double> undef, double %B2, i32 0
-; CHECK: %Z1.v.i1.2 = insertelement <2 x double> %Z1.v.i1.1, double %B1, i32 1
-; CHECK: %Z2 = fadd <2 x double> %Y1, %Z1.v.i1.2
- %R = fmul double %Z1, %Z2
-; CHECK: %Z2.v.r1 = extractelement <2 x double> %Z2, i32 0
-; CHECK: %Z2.v.r2 = extractelement <2 x double> %Z2, i32 1
-; CHECK: %R = fmul double %Z2.v.r2, %Z2.v.r1
- ret double %R
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain (last pair first splat)
-define double @test3(double %A1, double %A2, double %B1, double %B2) {
-; CHECK-LABEL: @test3(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
- %Z1 = fadd double %Y2, %B1
- %Z2 = fadd double %Y2, %B2
-; CHECK: %Z1.v.i0 = shufflevector <2 x double> %Y1, <2 x double> undef, <2 x i32> <i32 1, i32 1>
-; CHECK: %Z1 = fadd <2 x double> %Z1.v.i0, %X1.v.i1.2
- %R = fmul double %Z1, %Z2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
- ret double %R
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain (last pair second splat)
-define double @test4(double %A1, double %A2, double %B1, double %B2) {
-; CHECK-LABEL: @test4(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y1, %B2
-; CHECK: %Z1.v.i0 = shufflevector <2 x double> %Y1, <2 x double> undef, <2 x i32> zeroinitializer
-; CHECK: %Z1 = fadd <2 x double> %Z1.v.i0, %X1.v.i1.2
- %R = fmul double %Z1, %Z2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
- ret double %R
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain
-define <2 x float> @test5(<2 x float> %A1, <2 x float> %A2, <2 x float> %B1, <2 x float> %B2) {
-; CHECK-LABEL: @test5(
-; CHECK: %X1.v.i1 = shufflevector <2 x float> %B1, <2 x float> %B2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK: %X1.v.i0 = shufflevector <2 x float> %A1, <2 x float> %A2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %X1 = fsub <2 x float> %A1, %B1
- %X2 = fsub <2 x float> %A2, %B2
-; CHECK: %X1 = fsub <4 x float> %X1.v.i0, %X1.v.i1
- %Y1 = fmul <2 x float> %X1, %A1
- %Y2 = fmul <2 x float> %X2, %A2
-; CHECK: %Y1 = fmul <4 x float> %X1, %X1.v.i0
- %Z1 = fadd <2 x float> %Y1, %B1
- %Z2 = fadd <2 x float> %Y2, %B2
-; CHECK: %Z1 = fadd <4 x float> %Y1, %X1.v.i1
- %R = fmul <2 x float> %Z1, %Z2
-; CHECK: %Z1.v.r1 = shufflevector <4 x float> %Z1, <4 x float> undef, <2 x i32> <i32 0, i32 1>
-; CHECK: %Z1.v.r2 = shufflevector <4 x float> %Z1, <4 x float> undef, <2 x i32> <i32 2, i32 3>
-; CHECK: %R = fmul <2 x float> %Z1.v.r1, %Z1.v.r2
- ret <2 x float> %R
-; CHECK: ret <2 x float> %R
-}
-
-; Basic chain with shuffles
-define <8 x i8> @test6(<8 x i8> %A1, <8 x i8> %A2, <8 x i8> %B1, <8 x i8> %B2) {
-; CHECK-LABEL: @test6(
-; CHECK: %X1.v.i1 = shufflevector <8 x i8> %B1, <8 x i8> %B2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK: %X1.v.i0 = shufflevector <8 x i8> %A1, <8 x i8> %A2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %X1 = sub <8 x i8> %A1, %B1
- %X2 = sub <8 x i8> %A2, %B2
-; CHECK: %X1 = sub <16 x i8> %X1.v.i0, %X1.v.i1
- %Y1 = mul <8 x i8> %X1, %A1
- %Y2 = mul <8 x i8> %X2, %A2
-; CHECK: %Y1 = mul <16 x i8> %X1, %X1.v.i0
- %Z1 = add <8 x i8> %Y1, %B1
- %Z2 = add <8 x i8> %Y2, %B2
-; CHECK: %Z1 = add <16 x i8> %Y1, %X1.v.i1
- %Q1 = shufflevector <8 x i8> %Z1, <8 x i8> %Z2, <8 x i32> <i32 15, i32 8, i32 6, i32 1, i32 13, i32 10, i32 4, i32 3>
- %Q2 = shufflevector <8 x i8> %Z2, <8 x i8> %Z2, <8 x i32> <i32 6, i32 7, i32 0, i32 1, i32 2, i32 4, i32 4, i32 1>
-; CHECK: %Q1.v.i1 = shufflevector <16 x i8> %Z1, <16 x i8> undef, <16 x i32> <i32 8, i32 undef, i32 10, i32 undef, i32 undef, i32 13, i32 undef, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK: %Q1 = shufflevector <16 x i8> %Z1, <16 x i8> %Q1.v.i1, <16 x i32> <i32 23, i32 16, i32 6, i32 1, i32 21, i32 18, i32 4, i32 3, i32 14, i32 15, i32 8, i32 9, i32 10, i32 12, i32 12, i32 9>
- %R = mul <8 x i8> %Q1, %Q2
-; CHECK: %Q1.v.r1 = shufflevector <16 x i8> %Q1, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK: %Q1.v.r2 = shufflevector <16 x i8> %Q1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK: %R = mul <8 x i8> %Q1.v.r1, %Q1.v.r2
- ret <8 x i8> %R
-; CHECK: ret <8 x i8> %R
-}
-
-; Basic depth-3 chain (flipped order)
-define double @test7(double %A1, double %A2, double %B1, double %B2) {
-; CHECK-LABEL: @test7(
-; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
-; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
-; CHECK: %Y1 = fmul <2 x double> %X1, %X1.v.i0.2
- %Z2 = fadd double %Y2, %B2
- %Z1 = fadd double %Y1, %B1
-; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
- %R = fmul double %Z1, %Z2
-; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
-; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
- ret double %R
-; CHECK: ret double %R
-}
-
-; Basic depth-3 chain (subclass data)
-define i64 @test8(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
-; CHECK-LABEL: @test8(
-; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
-; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
-; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
-; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
- %X1 = sub nsw i64 %A1, %B1
- %X2 = sub i64 %A2, %B2
-; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
- %Y1 = mul i64 %X1, %A1
- %Y2 = mul i64 %X2, %A2
-; CHECK: %Y1 = mul <2 x i64> %X1, %X1.v.i0.2
- %Z1 = add i64 %Y1, %B1
- %Z2 = add i64 %Y2, %B2
-; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
- %R = mul i64 %Z1, %Z2
-; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
-; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
- ret i64 %R
-; CHECK: ret i64 %R
-}
-
diff --git a/test/Transforms/BBVectorize/simple3.ll b/test/Transforms/BBVectorize/simple3.ll
deleted file mode 100644
index 6edf7f07ac1d8..0000000000000
--- a/test/Transforms/BBVectorize/simple3.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-vector-bits=192 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-
-; Basic depth-3 chain
-define double @test1(double %A1, double %A2, double %A3, double %B1, double %B2, double %B3) {
-; CHECK-LABEL: @test1(
-; CHECK: %X1.v.i1.11 = insertelement <3 x double> undef, double %B1, i32 0
-; CHECK: %X1.v.i1.22 = insertelement <3 x double> %X1.v.i1.11, double %B2, i32 1
-; CHECK: %X1.v.i1 = insertelement <3 x double> %X1.v.i1.22, double %B3, i32 2
-; CHECK: %X1.v.i0.13 = insertelement <3 x double> undef, double %A1, i32 0
-; CHECK: %X1.v.i0.24 = insertelement <3 x double> %X1.v.i0.13, double %A2, i32 1
-; CHECK: %X1.v.i0 = insertelement <3 x double> %X1.v.i0.24, double %A3, i32 2
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %X3 = fsub double %A3, %B3
-; CHECK: %X1 = fsub <3 x double> %X1.v.i0, %X1.v.i1
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
- %Y3 = fmul double %X3, %A3
-; CHECK: %Y1 = fmul <3 x double> %X1, %X1.v.i0
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %Z3 = fadd double %Y3, %B3
-; CHECK: %Z1 = fadd <3 x double> %Y1, %X1.v.i1
- %R1 = fmul double %Z1, %Z2
- %R = fmul double %R1, %Z3
-; CHECK: %Z1.v.r210 = extractelement <3 x double> %Z1, i32 2
-; CHECK: %Z1.v.r1 = extractelement <3 x double> %Z1, i32 0
-; CHECK: %Z1.v.r2 = extractelement <3 x double> %Z1, i32 1
-; CHECK: %R1 = fmul double %Z1.v.r1, %Z1.v.r2
-; CHECK: %R = fmul double %R1, %Z1.v.r210
- ret double %R
-; CHECK: ret double %R
-}
-
diff --git a/test/Transforms/BBVectorize/vector-sel.ll b/test/Transforms/BBVectorize/vector-sel.ll
deleted file mode 100644
index cb775ceae6957..0000000000000
--- a/test/Transforms/BBVectorize/vector-sel.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: opt < %s -bb-vectorize -S | FileCheck %s
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-@d = external global [1 x [10 x [1 x i16]]], align 16
-
-;CHECK-LABEL: @test
-;CHECK: %0 = select i1 %bool, <4 x i16> <i16 -2, i16 -2, i16 -2, i16 -2>, <4 x i16> <i16 -3, i16 -3, i16 -3, i16 -3>
-;CHECK: %1 = select i1 %bool, <4 x i16> <i16 -2, i16 -2, i16 -2, i16 -2>, <4 x i16> <i16 -3, i16 -3, i16 -3, i16 -3>
-;CHECK: %2 = shufflevector <4 x i16> %0, <4 x i16> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-;CHECK: %3 = shufflevector <4 x i1> %boolvec, <4 x i1> %boolvec, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-;CHECK: %4 = select <8 x i1> %3, <8 x i16> <i16 -3, i16 -3, i16 -3, i16 -3, i16 -3, i16 -3, i16 -3, i16 -3>, <8 x i16> %2
-define void @test() {
-entry:
- %bool = icmp ne i32 undef, 0
- %boolvec = icmp ne <4 x i32> undef, zeroinitializer
- br label %body
-
-body:
- %0 = select i1 %bool, <4 x i16> <i16 -2, i16 -2, i16 -2, i16 -2>, <4 x i16> <i16 -3, i16 -3, i16 -3, i16 -3>
- %1 = select i1 %bool, <4 x i16> <i16 -2, i16 -2, i16 -2, i16 -2>, <4 x i16> <i16 -3, i16 -3, i16 -3, i16 -3>
- %2 = select <4 x i1> %boolvec, <4 x i16> <i16 -3, i16 -3, i16 -3, i16 -3>, <4 x i16> %0
- %3 = select <4 x i1> %boolvec, <4 x i16> <i16 -3, i16 -3, i16 -3, i16 -3>, <4 x i16> %1
- %4 = add nsw <4 x i16> %2, zeroinitializer
- %5 = add nsw <4 x i16> %3, zeroinitializer
- %6 = getelementptr inbounds [1 x [10 x [1 x i16]]], [1 x [10 x [1 x i16]]]* @d, i64 0, i64 0, i64 undef, i64 0
- %7 = bitcast i16* %6 to <4 x i16>*
- store <4 x i16> %4, <4 x i16>* %7, align 2
- %8 = getelementptr [1 x [10 x [1 x i16]]], [1 x [10 x [1 x i16]]]* @d, i64 0, i64 0, i64 undef, i64 4
- %9 = bitcast i16* %8 to <4 x i16>*
- store <4 x i16> %5, <4 x i16>* %9, align 2
- ret void
-}
diff --git a/test/Transforms/BBVectorize/xcore/no-vector-registers.ll b/test/Transforms/BBVectorize/xcore/no-vector-registers.ll
deleted file mode 100644
index 9ebdb7368a35c..0000000000000
--- a/test/Transforms/BBVectorize/xcore/no-vector-registers.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S -mtriple=xcore | FileCheck %s
-
-target datalayout = "e-p:32:32:32-a0:0:32-n32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f16:16:32-f32:32:32-f64:32:32"
-target triple = "xcore"
-
-; Basic depth-3 chain
-define double @test1(double %A1, double %A2, double %B1, double %B2) {
-; CHECK-LABEL: @test1(
-; CHECK-NOT: <2 x double>
- %X1 = fsub double %A1, %B1
- %X2 = fsub double %A2, %B2
- %Y1 = fmul double %X1, %A1
- %Y2 = fmul double %X2, %A2
- %Z1 = fadd double %Y1, %B1
- %Z2 = fadd double %Y2, %B2
- %R = fmul double %Z1, %Z2
- ret double %R
-}
diff --git a/test/Transforms/CodeExtractor/BlockAddressReference.ll b/test/Transforms/CodeExtractor/BlockAddressReference.ll
new file mode 100644
index 0000000000000..91f85bf3ed875
--- /dev/null
+++ b/test/Transforms/CodeExtractor/BlockAddressReference.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -loop-extract -S | FileCheck %s
+
+@label = common local_unnamed_addr global i8* null
+
+; CHECK: define
+; no outlined function
+; CHECK-NOT: define
+define i32 @sterix(i32 %n) {
+entry:
+ %tobool = icmp ne i32 %n, 0
+ ; this blockaddress references a basic block that goes in the extracted loop
+ %cond = select i1 %tobool, i8* blockaddress(@sterix, %for.cond), i8* blockaddress(@sterix, %exit)
+ store i8* %cond, i8** @label
+ %cmp5 = icmp sgt i32 %n, 0
+ br i1 %cmp5, label %for.body, label %exit
+
+for.cond:
+ %mul = shl nsw i32 %s.06, 1
+ %exitcond = icmp eq i32 %inc, %n
+ br i1 %exitcond, label %exit.loopexit, label %for.body
+
+for.body:
+ %i.07 = phi i32 [ %inc, %for.cond ], [ 0, %entry ]
+ %s.06 = phi i32 [ %mul, %for.cond ], [ 1, %entry ]
+ %inc = add nuw nsw i32 %i.07, 1
+ br label %for.cond
+
+exit.loopexit:
+ %phitmp = icmp ne i32 %s.06, 2
+ %phitmp8 = zext i1 %phitmp to i32
+ br label %exit
+
+exit:
+ %s.1 = phi i32 [ 1, %entry ], [ %phitmp8, %exit.loopexit ]
+ ret i32 %s.1
+}
diff --git a/test/Transforms/CodeExtractor/BlockAddressSelfReference.ll b/test/Transforms/CodeExtractor/BlockAddressSelfReference.ll
new file mode 100644
index 0000000000000..7d5a827a358aa
--- /dev/null
+++ b/test/Transforms/CodeExtractor/BlockAddressSelfReference.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -loop-extract -S | FileCheck %s
+
+@choum.addr = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@choum, %bb10), i8* blockaddress(@choum, %bb14), i8* blockaddress(@choum, %bb18)]
+
+; CHECK: define
+; no outlined function
+; CHECK-NOT: define
+
+define void @choum(i32 %arg, i32* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = icmp sgt i32 %arg, 0
+ br i1 %tmp, label %bb3, label %bb24
+
+bb3: ; preds = %bb
+ %tmp4 = sext i32 %arg2 to i64
+ %tmp5 = getelementptr inbounds [3 x i8*], [3 x i8*]* @choum.addr, i64 0, i64 %tmp4
+ %tmp6 = load i8*, i8** %tmp5
+ %tmp7 = zext i32 %arg to i64
+ br label %bb8
+
+bb8: ; preds = %bb18, %bb3
+ %tmp9 = phi i64 [ 0, %bb3 ], [ %tmp22, %bb18 ]
+ indirectbr i8* %tmp6, [label %bb10, label %bb14, label %bb18]
+
+bb10: ; preds = %bb8
+ %tmp11 = getelementptr inbounds i32, i32* %arg1, i64 %tmp9
+ %tmp12 = load i32, i32* %tmp11
+ %tmp13 = add nsw i32 %tmp12, 1
+ store i32 %tmp13, i32* %tmp11
+ br label %bb14
+
+bb14: ; preds = %bb10, %bb8
+ %tmp15 = getelementptr inbounds i32, i32* %arg1, i64 %tmp9
+ %tmp16 = load i32, i32* %tmp15
+ %tmp17 = shl nsw i32 %tmp16, 1
+ store i32 %tmp17, i32* %tmp15
+ br label %bb18
+
+bb18: ; preds = %bb14, %bb8
+ %tmp19 = getelementptr inbounds i32, i32* %arg1, i64 %tmp9
+ %tmp20 = load i32, i32* %tmp19
+ %tmp21 = add nsw i32 %tmp20, -3
+ store i32 %tmp21, i32* %tmp19
+ %tmp22 = add nuw nsw i64 %tmp9, 1
+ %tmp23 = icmp eq i64 %tmp22, %tmp7
+ br i1 %tmp23, label %bb24, label %bb8
+
+bb24: ; preds = %bb18, %bb
+ ret void
+}
diff --git a/test/Transforms/CodeGenPrepare/X86/memcmp.ll b/test/Transforms/CodeGenPrepare/X86/memcmp.ll
index 690e714af2610..2435cd7d0a830 100644
--- a/test/Transforms/CodeGenPrepare/X86/memcmp.ll
+++ b/test/Transforms/CodeGenPrepare/X86/memcmp.ll
@@ -4,47 +4,18 @@
declare i32 @memcmp(i8* nocapture, i8* nocapture, i64)
define i32 @cmp2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; X32-LABEL: @cmp2(
-; X32-NEXT: loadbb:
-; X32-NEXT: [[TMP0:%.*]] = bitcast i8* %x to i16*
-; X32-NEXT: [[TMP1:%.*]] = bitcast i8* %y to i16*
-; X32-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP0]]
-; X32-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
-; X32-NEXT: [[TMP4:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP2]])
-; X32-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]])
-; X32-NEXT: [[TMP6:%.*]] = zext i16 [[TMP4]] to i32
-; X32-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i32
-; X32-NEXT: [[TMP8:%.*]] = sub i32 [[TMP6]], [[TMP7]]
-; X32-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
-; X32-NEXT: br i1 [[TMP9]], label %res_block, label %endblock
-; X32: res_block:
-; X32-NEXT: [[TMP10:%.*]] = icmp ult i32 [[TMP6]], [[TMP7]]
-; X32-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i32 -1, i32 1
-; X32-NEXT: br label %endblock
-; X32: endblock:
-; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, %loadbb ], [ [[TMP11]], %res_block ]
-; X32-NEXT: ret i32 [[PHI_RES]]
-;
-; X64-LABEL: @cmp2(
-; X64-NEXT: loadbb:
-; X64-NEXT: [[TMP0:%.*]] = bitcast i8* %x to i16*
-; X64-NEXT: [[TMP1:%.*]] = bitcast i8* %y to i16*
-; X64-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP0]]
-; X64-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
-; X64-NEXT: [[TMP4:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP2]])
-; X64-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]])
-; X64-NEXT: [[TMP6:%.*]] = zext i16 [[TMP4]] to i64
-; X64-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i64
-; X64-NEXT: [[TMP8:%.*]] = sub i64 [[TMP6]], [[TMP7]]
-; X64-NEXT: [[TMP9:%.*]] = icmp ne i64 [[TMP8]], 0
-; X64-NEXT: br i1 [[TMP9]], label %res_block, label %endblock
-; X64: res_block:
-; X64-NEXT: [[TMP10:%.*]] = icmp ult i64 [[TMP6]], [[TMP7]]
-; X64-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i32 -1, i32 1
-; X64-NEXT: br label %endblock
-; X64: endblock:
-; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, %loadbb ], [ [[TMP11]], %res_block ]
-; X64-NEXT: ret i32 [[PHI_RES]]
+; ALL-LABEL: @cmp2(
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i16*
+; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i16*
+; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
+; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]]
+; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]])
+; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]])
+; ALL-NEXT: [[TMP7:%.*]] = icmp ne i16 [[TMP5]], [[TMP6]]
+; ALL-NEXT: [[TMP8:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
+; ALL-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i32 -1, i32 1
+; ALL-NEXT: [[TMP10:%.*]] = select i1 [[TMP7]], i32 [[TMP9]], i32 0
+; ALL-NEXT: ret i32 [[TMP10]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 2)
ret i32 %call
@@ -60,45 +31,18 @@ define i32 @cmp3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; X32-LABEL: @cmp4(
-; X32-NEXT: loadbb:
-; X32-NEXT: [[TMP0:%.*]] = bitcast i8* %x to i32*
-; X32-NEXT: [[TMP1:%.*]] = bitcast i8* %y to i32*
-; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
-; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
-; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
-; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
-; X32-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[TMP5]]
-; X32-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0
-; X32-NEXT: br i1 [[TMP7]], label %res_block, label %endblock
-; X32: res_block:
-; X32-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP4]], [[TMP5]]
-; X32-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i32 -1, i32 1
-; X32-NEXT: br label %endblock
-; X32: endblock:
-; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, %loadbb ], [ [[TMP9]], %res_block ]
-; X32-NEXT: ret i32 [[PHI_RES]]
-;
-; X64-LABEL: @cmp4(
-; X64-NEXT: loadbb:
-; X64-NEXT: [[TMP0:%.*]] = bitcast i8* %x to i32*
-; X64-NEXT: [[TMP1:%.*]] = bitcast i8* %y to i32*
-; X64-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
-; X64-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
-; X64-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
-; X64-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
-; X64-NEXT: [[TMP6:%.*]] = zext i32 [[TMP4]] to i64
-; X64-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
-; X64-NEXT: [[TMP8:%.*]] = sub i64 [[TMP6]], [[TMP7]]
-; X64-NEXT: [[TMP9:%.*]] = icmp ne i64 [[TMP8]], 0
-; X64-NEXT: br i1 [[TMP9]], label %res_block, label %endblock
-; X64: res_block:
-; X64-NEXT: [[TMP10:%.*]] = icmp ult i64 [[TMP6]], [[TMP7]]
-; X64-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i32 -1, i32 1
-; X64-NEXT: br label %endblock
-; X64: endblock:
-; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, %loadbb ], [ [[TMP11]], %res_block ]
-; X64-NEXT: ret i32 [[PHI_RES]]
+; ALL-LABEL: @cmp4(
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
+; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i32*
+; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
+; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]])
+; ALL-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], [[TMP6]]
+; ALL-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]]
+; ALL-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i32 -1, i32 1
+; ALL-NEXT: [[TMP10:%.*]] = select i1 [[TMP7]], i32 [[TMP9]], i32 0
+; ALL-NEXT: ret i32 [[TMP10]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 4)
ret i32 %call
@@ -137,23 +81,17 @@ define i32 @cmp8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; X32-NEXT: ret i32 [[CALL]]
;
; X64-LABEL: @cmp8(
-; X64-NEXT: loadbb:
-; X64-NEXT: [[TMP0:%.*]] = bitcast i8* %x to i64*
-; X64-NEXT: [[TMP1:%.*]] = bitcast i8* %y to i64*
-; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i64*
+; X64-NEXT: [[TMP2:%.*]] = bitcast i8* %y to i64*
; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
-; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
-; X64-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
-; X64-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP6]], 0
-; X64-NEXT: br i1 [[TMP7]], label %res_block, label %endblock
-; X64: res_block:
-; X64-NEXT: [[TMP8:%.*]] = icmp ult i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: [[TMP6:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP4]])
+; X64-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP5]], [[TMP6]]
+; X64-NEXT: [[TMP8:%.*]] = icmp ult i64 [[TMP5]], [[TMP6]]
; X64-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i32 -1, i32 1
-; X64-NEXT: br label %endblock
-; X64: endblock:
-; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, %loadbb ], [ [[TMP9]], %res_block ]
-; X64-NEXT: ret i32 [[PHI_RES]]
+; X64-NEXT: [[TMP10:%.*]] = select i1 [[TMP7]], i32 [[TMP9]], i32 0
+; X64-NEXT: ret i32 [[TMP10]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 8)
ret i32 %call
diff --git a/test/Transforms/CodeGenPrepare/nonintegral.ll b/test/Transforms/CodeGenPrepare/nonintegral.ll
new file mode 100644
index 0000000000000..06554cc1c9eea
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/nonintegral.ll
@@ -0,0 +1,68 @@
+; RUN: opt -S -codegenprepare < %s | FileCheck %s
+; RUN: opt -S -codegenprepare -addr-sink-using-gep=false < %s | FileCheck %s
+
+; This target data layout is modified to have a non-integral addrspace(1),
+; in order to verify that codegenprepare does not try to introduce illegal
+; inttoptrs.
+target datalayout =
+"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-ni:1"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @test_simple(i1 %cond, i64 addrspace(1)* %base) {
+; CHECK-LABEL: @test_simple
+; CHECK-NOT: inttoptr {{.*}} to i64 addrspace(1)*
+entry:
+ %addr = getelementptr inbounds i64, i64 addrspace(1)* %base, i64 5
+ %casted = bitcast i64 addrspace(1)* %addr to i32 addrspace(1)*
+ br i1 %cond, label %if.then, label %fallthrough
+
+if.then:
+ %v = load i32, i32 addrspace(1)* %casted, align 4
+ br label %fallthrough
+
+fallthrough:
+ ret void
+}
+
+
+define void @test_inttoptr_base(i1 %cond, i64 %base) {
+; CHECK-LABEL: @test_inttoptr_base
+; CHECK-NOT: inttoptr {{.*}} to i64 addrspace(1)*
+entry:
+; Doing the inttoptr in the integral addrspace(0) followed by an explicit
+; (frontend-introduced) addrspacecast is fine. We cannot however introduce
+; a direct inttoptr to addrspace(1)
+ %baseptr = inttoptr i64 %base to i64*
+ %baseptrni = addrspacecast i64 *%baseptr to i64 addrspace(1)*
+ %addr = getelementptr inbounds i64, i64 addrspace(1)* %baseptrni, i64 5
+ %casted = bitcast i64 addrspace(1)* %addr to i32 addrspace(1)*
+ br i1 %cond, label %if.then, label %fallthrough
+
+if.then:
+ %v = load i32, i32 addrspace(1)* %casted, align 4
+ br label %fallthrough
+
+fallthrough:
+ ret void
+}
+
+define void @test_ptrtoint_base(i1 %cond, i64 addrspace(1)* %base) {
+; CHECK-LABEL: @test_ptrtoint_base
+; CHECK-NOT: ptrtoint addrspace(1)* {{.*}} to i64
+entry:
+; This one is inserted by the frontend, so it's fine. We're not allowed to
+; directly ptrtoint %base ourselves though
+ %baseptr0 = addrspacecast i64 addrspace(1)* %base to i64*
+ %toint = ptrtoint i64* %baseptr0 to i64
+ %added = add i64 %toint, 8
+ %toptr = inttoptr i64 %added to i64*
+ %geped = getelementptr i64, i64* %toptr, i64 2
+ br i1 %cond, label %if.then, label %fallthrough
+
+if.then:
+ %v = load i64, i64* %geped, align 4
+ br label %fallthrough
+
+fallthrough:
+ ret void
+}
diff --git a/test/Transforms/ConstantHoisting/ARM/gep-struct-index.ll b/test/Transforms/ConstantHoisting/ARM/gep-struct-index.ll
new file mode 100644
index 0000000000000..45f4500b37c17
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/ARM/gep-struct-index.ll
@@ -0,0 +1,37 @@
+; RUN: opt -consthoist -S < %s | FileCheck %s
+target triple = "thumbv6m-none-eabi"
+
+%T = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
+i32, i32, i32, i32, i32, i32 }
+
+; Indices for GEPs that index into a struct type should not be hoisted.
+define i32 @test1(%T* %P) nounwind {
+; CHECK-LABEL: @test1
+; CHECK: %const = bitcast i32 256 to i32
+; CHECK: %addr1 = getelementptr %T, %T* %P, i32 %const, i32 256
+; CHECK: %addr2 = getelementptr %T, %T* %P, i32 %const, i32 256
+; The first index into the pointer is hoisted, but the second one into the
+; struct isn't.
+ %addr1 = getelementptr %T, %T* %P, i32 256, i32 256
+ %tmp1 = load i32, i32* %addr1
+ %addr2 = getelementptr %T, %T* %P, i32 256, i32 256
+ %tmp2 = load i32, i32* %addr2
+ %tmp4 = add i32 %tmp1, %tmp2
+ ret i32 %tmp4
+}
+
diff --git a/test/Transforms/Inline/AArch64/inline-target-attr.ll b/test/Transforms/Inline/AArch64/inline-target-attr.ll
new file mode 100644
index 0000000000000..af87ff6e7404d
--- /dev/null
+++ b/test/Transforms/Inline/AArch64/inline-target-attr.ll
@@ -0,0 +1,40 @@
+; RUN: opt < %s -mtriple=aarch64-unknown-linux-gnu -S -inline | FileCheck %s
+; RUN: opt < %s -mtriple=aarch64-unknown-linux-gnu -S -passes='cgscc(inline)' | FileCheck %s
+; Check that we only inline when we have compatible target attributes.
+
+define i32 @foo() #0 {
+entry:
+ %call = call i32 (...) @baz()
+ ret i32 %call
+; CHECK-LABEL: foo
+; CHECK: call i32 (...) @baz()
+}
+declare i32 @baz(...) #0
+
+define i32 @bar() #1 {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+; CHECK-LABEL: bar
+; CHECK: call i32 (...) @baz()
+}
+
+define i32 @qux() #0 {
+entry:
+ %call = call i32 @bar()
+ ret i32 %call
+; CHECK-LABEL: qux
+; CHECK: call i32 @bar()
+}
+
+define i32 @strict_align() #2 {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+; CHECK-LABEL: strict_align
+; CHECK: call i32 (...) @baz()
+}
+
+attributes #0 = { "target-cpu"="generic" "target-features"="+crc,+neon" }
+attributes #1 = { "target-cpu"="generic" "target-features"="+crc,+neon,+crypto" }
+attributes #2 = { "target-cpu"="generic" "target-features"="+crc,+neon,+strict-align" }
diff --git a/test/Transforms/Inline/inline-cold-callsite-pgo.ll b/test/Transforms/Inline/inline-cold-callsite-pgo.ll
new file mode 100644
index 0000000000000..26ea8e50eaf1e
--- /dev/null
+++ b/test/Transforms/Inline/inline-cold-callsite-pgo.ll
@@ -0,0 +1,54 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=100 -inline-cold-callsite-threshold=0 -S | FileCheck %s
+
+; This tests that a cold callsite gets the inline-cold-callsite-threshold
+; and does not get inlined. Another callsite to an identical callee that
+; is not cold gets inlined because cost is below the inline-threshold.
+
+define i32 @callee1(i32 %x) !prof !21 {
+ %x1 = add i32 %x, 1
+ %x2 = add i32 %x1, 1
+ %x3 = add i32 %x2, 1
+ call void @extern()
+ ret i32 %x3
+}
+
+define i32 @caller(i32 %n) !prof !22 {
+; CHECK-LABEL: @caller(
+ %cond = icmp sle i32 %n, 100
+ br i1 %cond, label %cond_true, label %cond_false, !prof !0
+
+cond_true:
+; CHECK-LABEL: cond_true:
+; CHECK-NOT: call i32 @callee1
+; CHECK: ret i32 %x3.i
+ %i = call i32 @callee1(i32 %n)
+ ret i32 %i
+cond_false:
+; CHECK-LABEL: cond_false:
+; CHECK: call i32 @callee1
+; CHECK: ret i32 %j
+ %j = call i32 @callee1(i32 %n)
+ ret i32 %j
+}
+declare void @extern()
+
+!0 = !{!"branch_weights", i32 200, i32 1}
+
+!llvm.module.flags = !{!1}
+!21 = !{!"function_entry_count", i64 200}
+!22 = !{!"function_entry_count", i64 200}
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 1000}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 1000, i32 1}
+!13 = !{i32 999000, i64 1000, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
diff --git a/test/Transforms/Inline/inline-cold-callsite.ll b/test/Transforms/Inline/inline-cold-callsite.ll
index 26ea8e50eaf1e..50dd55d62edbe 100644
--- a/test/Transforms/Inline/inline-cold-callsite.ll
+++ b/test/Transforms/Inline/inline-cold-callsite.ll
@@ -1,54 +1,47 @@
+
; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=100 -inline-cold-callsite-threshold=0 -S | FileCheck %s
; This tests that a cold callsite gets the inline-cold-callsite-threshold
; and does not get inlined. Another callsite to an identical callee that
; is not cold gets inlined because cost is below the inline-threshold.
-define i32 @callee1(i32 %x) !prof !21 {
- %x1 = add i32 %x, 1
- %x2 = add i32 %x1, 1
- %x3 = add i32 %x2, 1
+define void @callee() {
+ call void @extern()
call void @extern()
- ret i32 %x3
+ ret void
}
-define i32 @caller(i32 %n) !prof !22 {
-; CHECK-LABEL: @caller(
- %cond = icmp sle i32 %n, 100
- br i1 %cond, label %cond_true, label %cond_false, !prof !0
-
-cond_true:
-; CHECK-LABEL: cond_true:
-; CHECK-NOT: call i32 @callee1
-; CHECK: ret i32 %x3.i
- %i = call i32 @callee1(i32 %n)
- ret i32 %i
-cond_false:
-; CHECK-LABEL: cond_false:
-; CHECK: call i32 @callee1
-; CHECK: ret i32 %j
- %j = call i32 @callee1(i32 %n)
- ret i32 %j
-}
declare void @extern()
+declare i1 @ext(i32)
+
+; CHECK-LABEL: caller
+define i32 @caller(i32 %n) {
+entry:
+ %cmp4 = icmp sgt i32 %n, 0
+ br i1 %cmp4, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+ ret i32 0
+
+for.body:
+ %i.05 = phi i32 [ %inc, %for.inc ], [ 0, %entry ]
+; CHECK: %call = tail call
+ %call = tail call zeroext i1 @ext(i32 %i.05)
+; CHECK-NOT: call void @callee
+; CHECK-NEXT: call void @extern
+ call void @callee()
+ br i1 %call, label %cold, label %for.inc, !prof !0
+
+cold:
+; CHECK: call void @callee
+ call void @callee()
+ br label %for.inc
+
+for.inc:
+ %inc = add nuw nsw i32 %i.05, 1
+ %exitcond = icmp eq i32 %inc, %n
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
-!0 = !{!"branch_weights", i32 200, i32 1}
-
-!llvm.module.flags = !{!1}
-!21 = !{!"function_entry_count", i64 200}
-!22 = !{!"function_entry_count", i64 200}
-
-!1 = !{i32 1, !"ProfileSummary", !2}
-!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
-!3 = !{!"ProfileFormat", !"InstrProf"}
-!4 = !{!"TotalCount", i64 10000}
-!5 = !{!"MaxCount", i64 1000}
-!6 = !{!"MaxInternalCount", i64 1}
-!7 = !{!"MaxFunctionCount", i64 1000}
-!8 = !{!"NumCounts", i64 3}
-!9 = !{!"NumFunctions", i64 3}
-!10 = !{!"DetailedSummary", !11}
-!11 = !{!12, !13, !14}
-!12 = !{i32 10000, i64 1000, i32 1}
-!13 = !{i32 999000, i64 1000, i32 1}
-!14 = !{i32 999999, i64 1, i32 2}
+!0 = !{!"branch_weights", i32 1, i32 2000}
diff --git a/test/Transforms/Inline/optimization-remarks-yaml.ll b/test/Transforms/Inline/optimization-remarks-yaml.ll
index 532e443e2170b..16783634484fb 100644
--- a/test/Transforms/Inline/optimization-remarks-yaml.ll
+++ b/test/Transforms/Inline/optimization-remarks-yaml.ll
@@ -1,8 +1,21 @@
-; RUN: opt < %s -S -inline -pass-remarks-missed=inline -pass-remarks-with-hotness \
+; RUN: opt < %s -S -inline -pass-remarks-missed=inline \
+; RUN: -pass-remarks-with-hotness -pass-remarks-hotness-threshold 15 \
; RUN: -pass-remarks-output=%t 2>&1 | FileCheck %s
; RUN: cat %t | FileCheck -check-prefix=YAML %s
; RUN: opt < %s -S -inline -pass-remarks-with-hotness -pass-remarks-output=%t
; RUN: cat %t | FileCheck -check-prefix=YAML %s
+;
+; Verify that remarks that don't meet the hotness threshold are not output.
+; RUN: opt < %s -S -inline -pass-remarks-missed=inline \
+; RUN: -pass-remarks-with-hotness -pass-remarks-hotness-threshold 100 \
+; RUN: -pass-remarks-output=%t.threshold 2>&1 | \
+; RUN: FileCheck -check-prefix=THRESHOLD %s
+; RUN: test ! -s %t.threshold
+; RUN: opt < %s -S -inline \
+; RUN: -pass-remarks-with-hotness -pass-remarks-hotness-threshold 100 \
+; RUN: -pass-remarks-output=%t.threshold
+; The remarks output file should be empty.
+; RUN: test ! -s %t.threshold
; Check the YAML file generated for inliner remarks for this program:
;
@@ -43,6 +56,9 @@
; YAML-NEXT: - String: ' because its definition is unavailable'
; YAML-NEXT: ...
+; No remarks should be output, since none meet the threshold.
+; THRESHOLD-NOT: remark
+
; ModuleID = '/tmp/s.c'
source_filename = "/tmp/s.c"
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/Transforms/Inline/pr33637.ll b/test/Transforms/Inline/pr33637.ll
new file mode 100644
index 0000000000000..315feca27bd90
--- /dev/null
+++ b/test/Transforms/Inline/pr33637.ll
@@ -0,0 +1,25 @@
+; RUN: opt -inline < %s
+
+define void @patatino() {
+for.cond:
+ br label %for.body
+
+for.body:
+ %tobool = icmp eq i32 5, 0
+ %sel = select i1 %tobool, i32 0, i32 2
+ br i1 undef, label %cleanup1.thread, label %cleanup1
+
+cleanup1.thread:
+ ret void
+
+cleanup1:
+ %cleanup.dest2 = phi i32 [ %sel, %for.body ]
+ %switch = icmp ult i32 %cleanup.dest2, 1
+ ret void
+}
+
+define void @main() {
+entry:
+ call void @patatino()
+ ret void
+}
diff --git a/test/Transforms/InstCombine/and-or-not.ll b/test/Transforms/InstCombine/and-or-not.ll
index 28881668ca899..1baecb4a13a3b 100644
--- a/test/Transforms/InstCombine/and-or-not.ll
+++ b/test/Transforms/InstCombine/and-or-not.ll
@@ -370,7 +370,7 @@ define i32 @xor_to_xor6(float %fa, float %fb) {
; CHECK-LABEL: @xor_to_xor6(
; CHECK-NEXT: [[A:%.*]] = fptosi float %fa to i32
; CHECK-NEXT: [[B:%.*]] = fptosi float %fb to i32
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B]], [[A]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], [[B]]
; CHECK-NEXT: ret i32 [[XOR]]
;
%a = fptosi float %fa to i32
@@ -408,7 +408,7 @@ define i32 @xor_to_xor8(float %fa, float %fb) {
; CHECK-LABEL: @xor_to_xor8(
; CHECK-NEXT: [[A:%.*]] = fptosi float %fa to i32
; CHECK-NEXT: [[B:%.*]] = fptosi float %fb to i32
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B]], [[A]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], [[B]]
; CHECK-NEXT: ret i32 [[XOR]]
;
%a = fptosi float %fa to i32
@@ -446,7 +446,7 @@ define i32 @xor_to_xor10(float %fa, float %fb) {
; CHECK-LABEL: @xor_to_xor10(
; CHECK-NEXT: [[A:%.*]] = fptosi float %fa to i32
; CHECK-NEXT: [[B:%.*]] = fptosi float %fb to i32
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B]], [[A]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], [[B]]
; CHECK-NEXT: ret i32 [[XOR]]
;
%a = fptosi float %fa to i32
@@ -484,7 +484,7 @@ define i32 @xor_to_xor12(float %fa, float %fb) {
; CHECK-LABEL: @xor_to_xor12(
; CHECK-NEXT: [[A:%.*]] = fptosi float %fa to i32
; CHECK-NEXT: [[B:%.*]] = fptosi float %fb to i32
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B]], [[A]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], [[B]]
; CHECK-NEXT: ret i32 [[XOR]]
;
%a = fptosi float %fa to i32
@@ -518,7 +518,7 @@ define i64 @PR32830(i64 %a, i64 %b, i64 %c) {
}
; (~a | b) & (~b | a) --> ~(a ^ b)
-; TODO: this increases instrunction count if the pieces have additional users
+; TODO: this increases instruction count if the pieces have additional users
define i32 @and_to_nxor_multiuse(float %fa, float %fb) {
; CHECK-LABEL: @and_to_nxor_multiuse(
; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
@@ -545,7 +545,7 @@ define i32 @and_to_nxor_multiuse(float %fa, float %fb) {
}
; (a & b) | ~(a | b) --> ~(a ^ b)
-; TODO: this increases instrunction count if the pieces have additional users
+; TODO: this increases instruction count if the pieces have additional users
define i32 @or_to_nxor_multiuse(i32 %a, i32 %b) {
; CHECK-LABEL: @or_to_nxor_multiuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], [[B:%.*]]
@@ -564,3 +564,87 @@ define i32 @or_to_nxor_multiuse(i32 %a, i32 %b) {
%mul2 = mul i32 %mul1, %or2
ret i32 %mul2
}
+
+; (a | b) ^ (~a | ~b) --> ~(a ^ b)
+define i32 @xor_to_xnor1(float %fa, float %fb) {
+; CHECK-LABEL: @xor_to_xnor1(
+; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
+; CHECK-NEXT: [[B:%.*]] = fptosi float [[FB:%.*]] to i32
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR2_DEMORGAN:%.*]] = and i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR2:%.*]] = xor i32 [[OR2_DEMORGAN]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR1]], [[OR2]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %a = fptosi float %fa to i32
+ %b = fptosi float %fb to i32
+ %nota = xor i32 %a, -1
+ %notb = xor i32 %b, -1
+ %or1 = or i32 %a, %b
+ %or2 = or i32 %nota, %notb
+ %xor = xor i32 %or1, %or2
+ ret i32 %xor
+}
+
+; (a | b) ^ (~b | ~a) --> ~(a ^ b)
+define i32 @xor_to_xnor2(float %fa, float %fb) {
+; CHECK-LABEL: @xor_to_xnor2(
+; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
+; CHECK-NEXT: [[B:%.*]] = fptosi float [[FB:%.*]] to i32
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR2_DEMORGAN:%.*]] = and i32 [[B]], [[A]]
+; CHECK-NEXT: [[OR2:%.*]] = xor i32 [[OR2_DEMORGAN]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR1]], [[OR2]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %a = fptosi float %fa to i32
+ %b = fptosi float %fb to i32
+ %nota = xor i32 %a, -1
+ %notb = xor i32 %b, -1
+ %or1 = or i32 %a, %b
+ %or2 = or i32 %notb, %nota
+ %xor = xor i32 %or1, %or2
+ ret i32 %xor
+}
+
+; (~a | ~b) ^ (a | b) --> ~(a ^ b)
+define i32 @xor_to_xnor3(float %fa, float %fb) {
+; CHECK-LABEL: @xor_to_xnor3(
+; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
+; CHECK-NEXT: [[B:%.*]] = fptosi float [[FB:%.*]] to i32
+; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR1:%.*]] = xor i32 [[OR1_DEMORGAN]], -1
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[B]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR2]], [[OR1]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %a = fptosi float %fa to i32
+ %b = fptosi float %fb to i32
+ %nota = xor i32 %a, -1
+ %notb = xor i32 %b, -1
+ %or1 = or i32 %nota, %notb
+ %or2 = or i32 %a, %b
+ %xor = xor i32 %or1, %or2
+ ret i32 %xor
+}
+
+; (~a | ~b) ^ (b | a) --> ~(a ^ b)
+define i32 @xor_to_xnor4(float %fa, float %fb) {
+; CHECK-LABEL: @xor_to_xnor4(
+; CHECK-NEXT: [[A:%.*]] = fptosi float [[FA:%.*]] to i32
+; CHECK-NEXT: [[B:%.*]] = fptosi float [[FB:%.*]] to i32
+; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR1:%.*]] = xor i32 [[OR1_DEMORGAN]], -1
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[B]], [[A]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR2]], [[OR1]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %a = fptosi float %fa to i32
+ %b = fptosi float %fb to i32
+ %nota = xor i32 %a, -1
+ %notb = xor i32 %b, -1
+ %or1 = or i32 %nota, %notb
+ %or2 = or i32 %b, %a
+ %xor = xor i32 %or1, %or2
+ ret i32 %xor
+}
diff --git a/test/Transforms/InstCombine/clamp-to-minmax.ll b/test/Transforms/InstCombine/clamp-to-minmax.ll
new file mode 100644
index 0000000000000..b8cab29d5937f
--- /dev/null
+++ b/test/Transforms/InstCombine/clamp-to-minmax.ll
@@ -0,0 +1,500 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; (X < C1) ? C1 : MIN(X, C2)
+define float @clamp_float_fast_ordered_strict_maxmin(float %x) {
+;
+; CHECK-LABEL: @clamp_float_fast_ordered_strict_maxmin(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast olt float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], float [[X]], float 2.550000e+02
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast olt float %x, 255.0
+ %min = select i1 %cmp2, float %x, float 255.0
+ %cmp1 = fcmp fast olt float %x, 1.0
+ %r = select i1 %cmp1, float 1.0, float %min
+ ret float %r
+}
+
+; (X <= C1) ? C1 : MIN(X, C2)
+define float @clamp_float_fast_ordered_nonstrict_maxmin(float %x) {
+;
+; CHECK-LABEL: @clamp_float_fast_ordered_nonstrict_maxmin(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast olt float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], float [[X]], float 2.550000e+02
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast ole float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast olt float %x, 255.0
+ %min = select i1 %cmp2, float %x, float 255.0
+ %cmp1 = fcmp fast ole float %x, 1.0
+ %r = select i1 %cmp1, float 1.0, float %min
+ ret float %r
+}
+
+; (X > C1) ? C1 : MAX(X, C2)
+define float @clamp_float_fast_ordered_strict_minmax(float %x) {
+;
+; CHECK-LABEL: @clamp_float_fast_ordered_strict_minmax(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast ogt float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast ogt float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast ogt float %x, 1.0
+ %max = select i1 %cmp2, float %x, float 1.0
+ %cmp1 = fcmp fast ogt float %x, 255.0
+ %r = select i1 %cmp1, float 255.0, float %max
+ ret float %r
+}
+
+; (X >= C1) ? C1 : MAX(X, C2)
+define float @clamp_float_fast_ordered_nonstrict_minmax(float %x) {
+;
+; CHECK-LABEL: @clamp_float_fast_ordered_nonstrict_minmax(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast ogt float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast oge float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast ogt float %x, 1.0
+ %max = select i1 %cmp2, float %x, float 1.0
+ %cmp1 = fcmp fast oge float %x, 255.0
+ %r = select i1 %cmp1, float 255.0, float %max
+ ret float %r
+}
+
+
+; The same for unordered
+
+; (X < C1) ? C1 : MIN(X, C2)
+define float @clamp_float_fast_unordered_strict_maxmin(float %x) {
+;
+; CHECK-LABEL: @clamp_float_fast_unordered_strict_maxmin(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp fast oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast ult float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast ult float %x, 255.0
+ %min = select i1 %cmp2, float %x, float 255.0
+ %cmp1 = fcmp fast ult float %x, 1.0
+ %r = select i1 %cmp1, float 1.0, float %min
+ ret float %r
+}
+
+; (X <= C1) ? C1 : MIN(X, C2)
+define float @clamp_float_fast_unordered_nonstrict_maxmin(float %x) {
+;
+; CHECK-LABEL: @clamp_float_fast_unordered_nonstrict_maxmin(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp fast oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast ule float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast ult float %x, 255.0
+ %min = select i1 %cmp2, float %x, float 255.0
+ %cmp1 = fcmp fast ule float %x, 1.0
+ %r = select i1 %cmp1, float 1.0, float %min
+ ret float %r
+}
+
+; (X > C1) ? C1 : MAX(X, C2)
+define float @clamp_float_fast_unordered_strict_minmax(float %x) {
+;
+; CHECK-LABEL: @clamp_float_fast_unordered_strict_minmax(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp fast ole float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2_INV]], float 1.000000e+00, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast ugt float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast ugt float %x, 1.0
+ %max = select i1 %cmp2, float %x, float 1.0
+ %cmp1 = fcmp fast ugt float %x, 255.0
+ %r = select i1 %cmp1, float 255.0, float %max
+ ret float %r
+}
+
+; (X >= C1) ? C1 : MAX(X, C2)
+define float @clamp_float_fast_unordered_nonstrict_minmax(float %x) {
+;
+; CHECK-LABEL: @clamp_float_fast_unordered_nonstrict_minmax(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp fast ole float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2_INV]], float 1.000000e+00, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast uge float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast ugt float %x, 1.0
+ %max = select i1 %cmp2, float %x, float 1.0
+ %cmp1 = fcmp fast uge float %x, 255.0
+ %r = select i1 %cmp1, float 255.0, float %max
+ ret float %r
+}
+
+; Some more checks with fast
+
+; (X > 1.0) ? min(x, 255.0) : 1.0
+define float @clamp_test_1(float %x) {
+; CHECK-LABEL: @clamp_test_1(
+; CHECK-NEXT: [[INNER_CMP_INV:%.*]] = fcmp fast oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[INNER_SEL:%.*]] = select i1 [[INNER_CMP_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[OUTER_CMP:%.*]] = fcmp fast ugt float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[OUTER_CMP]], float [[INNER_SEL]], float 1.000000e+00
+; CHECK-NEXT: ret float [[R]]
+;
+ %inner_cmp = fcmp fast ult float %x, 255.0
+ %inner_sel = select i1 %inner_cmp, float %x, float 255.0
+ %outer_cmp = fcmp fast ugt float %x, 1.0
+ %r = select i1 %outer_cmp, float %inner_sel, float 1.0
+ ret float %r
+}
+
+; And something negative
+
+; Like @clamp_test_1 but HighConst < LowConst
+define float @clamp_negative_wrong_const(float %x) {
+; CHECK-LABEL: @clamp_negative_wrong_const(
+; CHECK-NEXT: [[INNER_CMP_INV:%.*]] = fcmp fast oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[INNER_SEL:%.*]] = select i1 [[INNER_CMP_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[OUTER_CMP:%.*]] = fcmp fast ugt float [[X]], 5.120000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[OUTER_CMP]], float [[INNER_SEL]], float 5.120000e+02
+; CHECK-NEXT: ret float [[R]]
+;
+ %inner_cmp = fcmp fast ult float %x, 255.0
+ %inner_sel = select i1 %inner_cmp, float %x, float 255.0
+ %outer_cmp = fcmp fast ugt float %x, 512.0
+ %r = select i1 %outer_cmp, float %inner_sel, float 512.0
+ ret float %r
+}
+
+; Like @clamp_test_1 but both are min
+define float @clamp_negative_same_op(float %x) {
+; CHECK-LABEL: @clamp_negative_same_op(
+; CHECK-NEXT: [[INNER_CMP_INV:%.*]] = fcmp fast oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[INNER_SEL:%.*]] = select i1 [[INNER_CMP_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[OUTER_CMP:%.*]] = fcmp fast ult float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[OUTER_CMP]], float [[INNER_SEL]], float 1.000000e+00
+; CHECK-NEXT: ret float [[R]]
+;
+ %inner_cmp = fcmp fast ult float %x, 255.0
+ %inner_sel = select i1 %inner_cmp, float %x, float 255.0
+ %outer_cmp = fcmp fast ult float %x, 1.0
+ %r = select i1 %outer_cmp, float %inner_sel, float 1.0
+ ret float %r
+}
+
+
+; And now without fast.
+
+; First, check that we don't do bad things in the presence of signed zeros
+define float @clamp_float_with_zero1(float %x) {
+; CHECK-LABEL: @clamp_float_with_zero1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast olt float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], float [[X]], float 2.550000e+02
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ole float [[X]], 0.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 0.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast olt float %x, 255.0
+ %min = select i1 %cmp2, float %x, float 255.0
+ %cmp1 = fcmp ole float %x, 0.0
+ %r = select i1 %cmp1, float 0.0, float %min
+ ret float %r
+}
+
+define float @clamp_float_with_zero2(float %x) {
+; CHECK-LABEL: @clamp_float_with_zero2(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast olt float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], float [[X]], float 2.550000e+02
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float [[X]], 0.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 0.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp fast olt float %x, 255.0
+ %min = select i1 %cmp2, float %x, float 255.0
+ %cmp1 = fcmp olt float %x, 0.0
+ %r = select i1 %cmp1, float 0.0, float %min
+ ret float %r
+}
+
+; Also, here we care more about the ordering of the inner min/max, so
+; two times more cases.
+; TODO: that is not implemented yet, so these checks are for the
+; future. This means that checks below can just check that
+; "fcmp.*%x" happens twice for each label.
+
+; (X < C1) ? C1 : MIN(X, C2)
+define float @clamp_float_ordered_strict_maxmin1(float %x) {
+;
+; CHECK-LABEL: @clamp_float_ordered_strict_maxmin1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp olt float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], float [[X]], float 2.550000e+02
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp olt float %x, 255.0 ; X is NaN => false
+ %min = select i1 %cmp2, float %x, float 255.0 ; 255.0
+ %cmp1 = fcmp olt float %x, 1.0 ; false
+ %r = select i1 %cmp1, float 1.0, float %min ; min (255.0)
+ ret float %r
+}
+
+define float @clamp_float_ordered_strict_maxmin2(float %x) {
+;
+; CHECK-LABEL: @clamp_float_ordered_strict_maxmin2(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ult float %x, 255.0 ; X is NaN => true
+ %min = select i1 %cmp2, float %x, float 255.0 ; NaN
+ %cmp1 = fcmp olt float %x, 1.0 ; false
+ %r = select i1 %cmp1, float 1.0, float %min ; min (NaN)
+ ret float %r
+}
+
+; (X <= C1) ? C1 : MIN(X, C2)
+define float @clamp_float_ordered_nonstrict_maxmin1(float %x) {
+;
+; CHECK-LABEL: @clamp_float_ordered_nonstrict_maxmin1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp olt float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], float [[X]], float 2.550000e+02
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ole float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp olt float %x, 255.0 ; X is NaN => false
+ %min = select i1 %cmp2, float %x, float 255.0 ; 255.0
+ %cmp1 = fcmp ole float %x, 1.0 ; false
+ %r = select i1 %cmp1, float 1.0, float %min ; min (255.0)
+ ret float %r
+}
+
+define float @clamp_float_ordered_nonstrict_maxmin2(float %x) {
+;
+; CHECK-LABEL: @clamp_float_ordered_nonstrict_maxmin2(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ole float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ult float %x, 255.0 ; x is NaN => true
+ %min = select i1 %cmp2, float %x, float 255.0 ; NaN
+ %cmp1 = fcmp ole float %x, 1.0 ; false
+ %r = select i1 %cmp1, float 1.0, float %min ; min (NaN)
+ ret float %r
+}
+
+; (X > C1) ? C1 : MAX(X, C2)
+define float @clamp_float_ordered_strict_minmax1(float %x) {
+;
+; CHECK-LABEL: @clamp_float_ordered_strict_minmax1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ogt float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ogt float %x, 1.0 ; x is NaN => false
+ %max = select i1 %cmp2, float %x, float 1.0 ; 1.0
+ %cmp1 = fcmp ogt float %x, 255.0 ; false
+ %r = select i1 %cmp1, float 255.0, float %max ; max (1.0)
+ ret float %r
+}
+
+define float @clamp_float_ordered_strict_minmax2(float %x) {
+;
+; CHECK-LABEL: @clamp_float_ordered_strict_minmax2(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp ole float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2_INV]], float 1.000000e+00, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ogt float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ugt float %x, 1.0 ; x is NaN => true
+ %max = select i1 %cmp2, float %x, float 1.0 ; NaN
+ %cmp1 = fcmp ogt float %x, 255.0 ; false
+ %r = select i1 %cmp1, float 255.0, float %max ; max (NaN)
+ ret float %r
+}
+
+; (X >= C1) ? C1 : MAX(X, C2)
+define float @clamp_float_ordered_nonstrict_minmax1(float %x) {
+;
+; CHECK-LABEL: @clamp_float_ordered_nonstrict_minmax1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp oge float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ogt float %x, 1.0 ; x is NaN => false
+ %max = select i1 %cmp2, float %x, float 1.0 ; 1.0
+ %cmp1 = fcmp oge float %x, 255.0 ; false
+ %r = select i1 %cmp1, float 255.0, float %max ; max (1.0)
+ ret float %r
+}
+
+define float @clamp_float_ordered_nonstrict_minmax2(float %x) {
+;
+; CHECK-LABEL: @clamp_float_ordered_nonstrict_minmax2(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp ole float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2_INV]], float 1.000000e+00, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp oge float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ugt float %x, 1.0 ; x is NaN => true
+ %max = select i1 %cmp2, float %x, float 1.0 ; NaN
+ %cmp1 = fcmp oge float %x, 255.0 ; false
+ %r = select i1 %cmp1, float 255.0, float %max ; max (NaN)
+ ret float %r
+}
+
+
+; The same for unordered
+
+; (X < C1) ? C1 : MIN(X, C2)
+define float @clamp_float_unordered_strict_maxmin1(float %x) {
+;
+; CHECK-LABEL: @clamp_float_unordered_strict_maxmin1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp olt float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], float [[X]], float 2.550000e+02
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ult float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp olt float %x, 255.0 ; x is NaN => false
+ %min = select i1 %cmp2, float %x, float 255.0 ; 255.0
+ %cmp1 = fcmp ult float %x, 1.0 ; true
+ %r = select i1 %cmp1, float 1.0, float %min ; 1.0
+ ret float %r
+}
+
+define float @clamp_float_unordered_strict_maxmin2(float %x) {
+;
+; CHECK-LABEL: @clamp_float_unordered_strict_maxmin2(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ult float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ult float %x, 255.0 ; x is NaN => true
+ %min = select i1 %cmp2, float %x, float 255.0 ; NaN
+ %cmp1 = fcmp ult float %x, 1.0 ; true
+ %r = select i1 %cmp1, float 1.0, float %min ; 1.0
+ ret float %r
+}
+
+; (X <= C1) ? C1 : MIN(X, C2)
+define float @clamp_float_unordered_nonstrict_maxmin1(float %x) {
+;
+; CHECK-LABEL: @clamp_float_unordered_nonstrict_maxmin1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp olt float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], float [[X]], float 2.550000e+02
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ule float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp olt float %x, 255.0 ; x is NaN => false
+ %min = select i1 %cmp2, float %x, float 255.0 ; 255.0
+ %cmp1 = fcmp ule float %x, 1.0 ; true
+ %r = select i1 %cmp1, float 1.0, float %min ; 1.0
+ ret float %r
+}
+
+define float @clamp_float_unordered_nonstrict_maxmin2(float %x) {
+;
+; CHECK-LABEL: @clamp_float_unordered_nonstrict_maxmin2(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp oge float [[X:%.*]], 2.550000e+02
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2_INV]], float 2.550000e+02, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ule float [[X]], 1.000000e+00
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 1.000000e+00, float [[MIN]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ult float %x, 255.0 ; x is NaN => true
+ %min = select i1 %cmp2, float %x, float 255.0 ; NaN
+ %cmp1 = fcmp ule float %x, 1.0 ; true
+ %r = select i1 %cmp1, float 1.0, float %min ; 1.0
+ ret float %r
+}
+
+; (X > C1) ? C1 : MAX(X, C2)
+define float @clamp_float_unordered_strict_minmax1(float %x) {
+;
+; CHECK-LABEL: @clamp_float_unordered_strict_minmax1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ugt float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ogt float %x, 1.0 ; x is NaN => false
+ %max = select i1 %cmp2, float %x, float 1.0 ; 1.0
+ %cmp1 = fcmp ugt float %x, 255.0 ; true
+ %r = select i1 %cmp1, float 255.0, float %max ; 255.0
+ ret float %r
+}
+
+define float @clamp_float_unordered_strict_minmax2(float %x) {
+;
+; CHECK-LABEL: @clamp_float_unordered_strict_minmax2(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp ole float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2_INV]], float 1.000000e+00, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ugt float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ugt float %x, 1.0 ; x is NaN => true
+ %max = select i1 %cmp2, float %x, float 1.0 ; NaN
+ %cmp1 = fcmp ugt float %x, 255.0 ; true
+ %r = select i1 %cmp1, float 255.0, float %max ; 255.0
+ ret float %r
+}
+
+; (X >= C1) ? C1 : MAX(X, C2)
+define float @clamp_float_unordered_nonstrict_minmax1(float %x) {
+;
+; CHECK-LABEL: @clamp_float_unordered_nonstrict_minmax1(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp uge float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ogt float %x, 1.0 ; x is NaN => false
+ %max = select i1 %cmp2, float %x, float 1.0 ; 1.0
+ %cmp1 = fcmp uge float %x, 255.0 ; true
+ %r = select i1 %cmp1, float 255.0, float %max ; 255.0
+ ret float %r
+}
+
+define float @clamp_float_unordered_nonstrict_minmax2(float %x) {
+;
+; CHECK-LABEL: @clamp_float_unordered_nonstrict_minmax2(
+; CHECK-NEXT: [[CMP2_INV:%.*]] = fcmp ole float [[X:%.*]], 1.000000e+00
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2_INV]], float 1.000000e+00, float [[X]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp uge float [[X]], 2.550000e+02
+; CHECK-NEXT: [[R:%.*]] = select i1 [[CMP1]], float 2.550000e+02, float [[MAX]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %cmp2 = fcmp ugt float %x, 1.0 ; x is NaN => true
+ %max = select i1 %cmp2, float %x, float 1.0 ; NaN
+ %cmp1 = fcmp uge float %x, 255.0 ; true
+ %r = select i1 %cmp1, float 255.0, float %max ; 255.0
+ ret float %r
+}
diff --git a/test/Transforms/InstCombine/extractinsert-tbaa.ll b/test/Transforms/InstCombine/extractinsert-tbaa.ll
new file mode 100644
index 0000000000000..b2a3a1a1bf9ba
--- /dev/null
+++ b/test/Transforms/InstCombine/extractinsert-tbaa.ll
@@ -0,0 +1,45 @@
+; RUN: opt -S -instcombine %s -o - | FileCheck %s
+
+%Complex = type { double, double }
+
+; Check that instcombine preserves TBAA when narrowing loads
+define double @teststructextract(%Complex *%val) {
+; CHECK: load double, {{.*}}, !tbaa
+; CHECK-NOT: load %Complex
+ %loaded = load %Complex, %Complex *%val, !tbaa !1
+ %real = extractvalue %Complex %loaded, 0
+ ret double %real
+}
+
+define double @testarrayextract([2 x double] *%val) {
+; CHECK: load double, {{.*}}, !tbaa
+; CHECK-NOT: load [2 x double]
+ %loaded = load [2 x double], [2 x double] *%val, !tbaa !1
+ %real = extractvalue [2 x double] %loaded, 0
+ ret double %real
+}
+
+; Check that inscombine preserves TBAA when breaking up stores
+define void @teststructinsert(%Complex *%loc, double %a, double %b) {
+; CHECK: store double %a, {{.*}}, !tbaa
+; CHECK: store double %b, {{.*}}, !tbaa
+; CHECK-NOT: store %Complex
+ %inserted = insertvalue %Complex undef, double %a, 0
+ %inserted2 = insertvalue %Complex %inserted, double %b, 1
+ store %Complex %inserted2, %Complex *%loc, !tbaa !1
+ ret void
+}
+
+define void @testarrayinsert([2 x double] *%loc, double %a, double %b) {
+; CHECK: store double %a, {{.*}}, !tbaa
+; CHECK: store double %b, {{.*}}, !tbaa
+; CHECK-NOT: store [2 x double]
+ %inserted = insertvalue [2 x double] undef, double %a, 0
+ %inserted2 = insertvalue [2 x double] %inserted, double %b, 1
+ store [2 x double] %inserted2, [2 x double] *%loc, !tbaa !1
+ ret void
+}
+
+!0 = !{!"tbaa_root"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"Complex", !0, i64 0}
diff --git a/test/Transforms/InstCombine/ffs-1.ll b/test/Transforms/InstCombine/ffs-1.ll
index af4ee85216ef2..5dcdae1084458 100644
--- a/test/Transforms/InstCombine/ffs-1.ll
+++ b/test/Transforms/InstCombine/ffs-1.ll
@@ -150,8 +150,8 @@ define i32 @test_simplify13(i32 %x) {
; ALL-LABEL: @test_simplify13(
; ALL-NEXT: [[CTTZ:%.*]] = call i32 @llvm.cttz.i32(i32 %x, i1 true)
; ALL-NEXT: [[TMP1:%.*]] = add nuw nsw i32 [[CTTZ]], 1
-; ALL-NEXT: [[TMP2:%.*]] = icmp ne i32 %x, 0
-; ALL-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 0
+; ALL-NEXT: [[TMP2:%.*]] = icmp eq i32 %x, 0
+; ALL-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 0, i32 [[TMP1]]
; ALL-NEXT: ret i32 [[TMP3]]
;
%ret = call i32 @ffs(i32 %x)
@@ -166,8 +166,8 @@ define i32 @test_simplify14(i32 %x) {
; TARGET-LABEL: @test_simplify14(
; TARGET-NEXT: [[CTTZ:%.*]] = call i32 @llvm.cttz.i32(i32 %x, i1 true)
; TARGET-NEXT: [[TMP1:%.*]] = add nuw nsw i32 [[CTTZ]], 1
-; TARGET-NEXT: [[TMP2:%.*]] = icmp ne i32 %x, 0
-; TARGET-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 0
+; TARGET-NEXT: [[TMP2:%.*]] = icmp eq i32 %x, 0
+; TARGET-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 0, i32 [[TMP1]]
; TARGET-NEXT: ret i32 [[TMP3]]
;
%ret = call i32 @ffsl(i32 %x)
@@ -183,8 +183,8 @@ define i32 @test_simplify15(i64 %x) {
; TARGET-NEXT: [[CTTZ:%.*]] = call i64 @llvm.cttz.i64(i64 %x, i1 true)
; TARGET-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[CTTZ]], 1
; TARGET-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
-; TARGET-NEXT: [[TMP3:%.*]] = icmp ne i64 %x, 0
-; TARGET-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 0
+; TARGET-NEXT: [[TMP3:%.*]] = icmp eq i64 %x, 0
+; TARGET-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
; TARGET-NEXT: ret i32 [[TMP4]]
;
%ret = call i32 @ffsll(i64 %x)
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
index ed570da73c9ef..127fde10e9f7b 100644
--- a/test/Transforms/InstCombine/icmp.ll
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -762,6 +762,22 @@ define i1 @test52(i32 %x1) {
ret i1 %A
}
+define i1 @test52b(i128 %x1) {
+; CHECK-LABEL: @test52b(
+; CHECK-NEXT: [[TMP1:%.*]] = and i128 [[X1:%.*]], 16711935
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i128 [[TMP1]], 4980863
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %conv = and i128 %x1, 255
+ %cmp = icmp eq i128 %conv, 127
+ %tmp2 = lshr i128 %x1, 16
+ %tmp3 = trunc i128 %tmp2 to i8
+ %cmp15 = icmp eq i8 %tmp3, 76
+
+ %A = and i1 %cmp, %cmp15
+ ret i1 %A
+}
+
; PR9838
define i1 @test53(i32 %a, i32 %b) {
; CHECK-LABEL: @test53(
@@ -2423,8 +2439,8 @@ define i32 @f7(i32 %a, i32 %b) {
; CHECK-LABEL: @f7(
; CHECK-NEXT: [[CMP_UNSHIFTED:%.*]] = xor i32 %a, %b
; CHECK-NEXT: [[CMP_MASK:%.*]] = and i32 [[CMP_UNSHIFTED]], 511
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[CMP:%.*]].mask, 0
-; CHECK-NEXT: [[S:%.*]] = select i1 [[CMP]], i32 10000, i32 0
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[CMP_MASK]], 0
+; CHECK-NEXT: [[S:%.*]] = select i1 [[CMP]], i32 0, i32 10000
; CHECK-NEXT: ret i32 [[S]]
;
%sext = shl i32 %a, 23
@@ -2959,3 +2975,63 @@ define <2 x i1> @eq_mul_constants_with_tz_splat(<2 x i32> %x, <2 x i32> %y) {
ret <2 x i1> %C
}
+declare i32 @llvm.bswap.i32(i32)
+
+define i1 @bswap_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: @bswap_ne(
+; CHECK-NEXT: [[SWAPX:%.*]] = call i32 @llvm.bswap.i32(i32 %x)
+; CHECK-NEXT: [[SWAPY:%.*]] = call i32 @llvm.bswap.i32(i32 %y)
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[SWAPX]], [[SWAPY]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %swapx = call i32 @llvm.bswap.i32(i32 %x)
+ %swapy = call i32 @llvm.bswap.i32(i32 %y)
+ %cmp = icmp ne i32 %swapx, %swapy
+ ret i1 %cmp
+}
+
+declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
+
+define <8 x i1> @bswap_vec_eq(<8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: @bswap_vec_eq(
+; CHECK-NEXT: [[SWAPX:%.*]] = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %x)
+; CHECK-NEXT: [[SWAPY:%.*]] = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %y)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i16> [[SWAPX]], [[SWAPY]]
+; CHECK-NEXT: ret <8 x i1> [[CMP]]
+;
+ %swapx = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %x)
+ %swapy = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %y)
+ %cmp = icmp eq <8 x i16> %swapx, %swapy
+ ret <8 x i1> %cmp
+}
+
+declare i64 @llvm.bitreverse.i64(i64)
+
+define i1 @bitreverse_eq(i64 %x, i64 %y) {
+; CHECK-LABEL: @bitreverse_eq(
+; CHECK-NEXT: [[REVX:%.*]] = call i64 @llvm.bitreverse.i64(i64 %x)
+; CHECK-NEXT: [[REVY:%.*]] = call i64 @llvm.bitreverse.i64(i64 %y)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[REVX]], [[REVY]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %revx = call i64 @llvm.bitreverse.i64(i64 %x)
+ %revy = call i64 @llvm.bitreverse.i64(i64 %y)
+ %cmp = icmp eq i64 %revx, %revy
+ ret i1 %cmp
+}
+
+declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)
+
+define <8 x i1> @bitreverse_vec_ne(<8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: @bitreverse_vec_ne(
+; CHECK-NEXT: [[REVX:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %x)
+; CHECK-NEXT: [[REVY:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %y)
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i16> [[REVX]], [[REVY]]
+; CHECK-NEXT: ret <8 x i1> [[CMP]]
+;
+ %revx = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %x)
+ %revy = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %y)
+ %cmp = icmp ne <8 x i16> %revx, %revy
+ ret <8 x i1> %cmp
+}
+
diff --git a/test/Transforms/InstCombine/logical-select.ll b/test/Transforms/InstCombine/logical-select.ll
index 6c00dec60ed6a..4c0223aa6dd1d 100644
--- a/test/Transforms/InstCombine/logical-select.ll
+++ b/test/Transforms/InstCombine/logical-select.ll
@@ -62,19 +62,15 @@ define i32 @poo(i32 %a, i32 %b, i32 %c, i32 %d) {
ret i32 %t3
}
-; TODO: For the next 4 tests, are there potential canonicalizations and/or folds for these
-; in InstCombine? Independent of that, tests like this that may not show any transforms
-; still have value because they can help identify conflicting canonicalization rules that
-; lead to infinite looping.
-
; PR32791 - https://bugs.llvm.org//show_bug.cgi?id=32791
-; Fold two selects with inverted predicates and zero operands.
+; The 2nd compare/select are canonicalized, so CSE and another round of instcombine or some other pass will fold this.
+
define i32 @fold_inverted_icmp_preds(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @fold_inverted_icmp_preds(
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 %a, %b
; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CMP1]], i32 %c, i32 0
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 %a, %b
-; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CMP2]], i32 %d, i32 0
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 %a, %b
+; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CMP2]], i32 0, i32 %d
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SEL1]], [[SEL2]]
; CHECK-NEXT: ret i32 [[OR]]
;
@@ -86,12 +82,14 @@ define i32 @fold_inverted_icmp_preds(i32 %a, i32 %b, i32 %c, i32 %d) {
ret i32 %or
}
+; The 2nd compare/select are canonicalized, so CSE and another round of instcombine or some other pass will fold this.
+
define i32 @fold_inverted_icmp_preds_reverse(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @fold_inverted_icmp_preds_reverse(
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 %a, %b
; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[CMP1]], i32 0, i32 %c
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 %a, %b
-; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CMP2]], i32 0, i32 %d
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 %a, %b
+; CHECK-NEXT: [[SEL2:%.*]] = select i1 [[CMP2]], i32 %d, i32 0
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SEL1]], [[SEL2]]
; CHECK-NEXT: ret i32 [[OR]]
;
@@ -103,6 +101,8 @@ define i32 @fold_inverted_icmp_preds_reverse(i32 %a, i32 %b, i32 %c, i32 %d) {
ret i32 %or
}
+; TODO: Should fcmp have the same sort of predicate canonicalization as icmp?
+
define i32 @fold_inverted_fcmp_preds(float %a, float %b, i32 %c, i32 %d) {
; CHECK-LABEL: @fold_inverted_fcmp_preds(
; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float %a, %b
@@ -120,10 +120,12 @@ define i32 @fold_inverted_fcmp_preds(float %a, float %b, i32 %c, i32 %d) {
ret i32 %or
}
+; The 2nd compare/select are canonicalized, so CSE and another round of instcombine or some other pass will fold this.
+
define <2 x i32> @fold_inverted_icmp_vector_preds(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; CHECK-LABEL: @fold_inverted_icmp_vector_preds(
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ne <2 x i32> %a, %b
-; CHECK-NEXT: [[SEL1:%.*]] = select <2 x i1> [[CMP1]], <2 x i32> %c, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq <2 x i32> %a, %b
+; CHECK-NEXT: [[SEL1:%.*]] = select <2 x i1> [[CMP1]], <2 x i32> zeroinitializer, <2 x i32> %c
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq <2 x i32> %a, %b
; CHECK-NEXT: [[SEL2:%.*]] = select <2 x i1> [[CMP2]], <2 x i32> %d, <2 x i32> zeroinitializer
; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[SEL1]], [[SEL2]]
diff --git a/test/Transforms/InstCombine/max-of-nots.ll b/test/Transforms/InstCombine/max-of-nots.ll
index 519f1c6a90b04..0302c9ec6d798 100644
--- a/test/Transforms/InstCombine/max-of-nots.ll
+++ b/test/Transforms/InstCombine/max-of-nots.ll
@@ -93,14 +93,15 @@ define i32 @max_of_nots(i32 %x, i32 %y) {
; negative test case (i.e. can not simplify) : ABS(MIN(NOT x,y))
define i32 @abs_of_min_of_not(i32 %x, i32 %y) {
; CHECK-LABEL: @abs_of_min_of_not(
-; CHECK-NEXT: xor
-; CHECK-NEXT: add
-; CHECK-NEXT: icmp sge
-; CHECK-NEXT: select
-; CHECK-NEXT: icmp sgt
-; CHECK-NEXT: sub
-; CHECK-NEXT: select
-; CHECK-NEXT: ret
+; CHECK-NEXT: [[XORD:%.*]] = xor i32 %x, -1
+; CHECK-NEXT: [[YADD:%.*]] = add i32 %y, 2
+; CHECK-NEXT: [[COND_I:%.*]] = icmp slt i32 [[YADD]], [[XORD]]
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[COND_I]], i32 [[YADD]], i32 [[XORD]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[MIN]], -1
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[MIN]]
+; CHECK-NEXT: [[ABS:%.*]] = select i1 [[CMP2]], i32 [[MIN]], i32 [[SUB]]
+; CHECK-NEXT: ret i32 [[ABS]]
+;
%xord = xor i32 %x, -1
%yadd = add i32 %y, 2
diff --git a/test/Transforms/InstCombine/memmove.ll b/test/Transforms/InstCombine/memmove.ll
index 96f230eece066..7bc6d9a675d12 100644
--- a/test/Transforms/InstCombine/memmove.ll
+++ b/test/Transforms/InstCombine/memmove.ll
@@ -1,6 +1,6 @@
; This test makes sure that memmove instructions are properly eliminated.
;
-; RUN: opt < %s -instcombine -S | not grep "call void @llvm.memmove"
+; RUN: opt < %s -instcombine -S | FileCheck %s
@S = internal constant [33 x i8] c"panic: restorelist inconsistency\00" ; <[33 x i8]*> [#uses=1]
@h = constant [2 x i8] c"h\00" ; <[2 x i8]*> [#uses=1]
@@ -8,32 +8,46 @@
@hello_u = constant [8 x i8] c"hello_u\00" ; <[8 x i8]*> [#uses=1]
define void @test1(i8* %A, i8* %B, i32 %N) {
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %A, i8* %B, i32 0, i32 1, i1 false)
- ret void
+ ;; CHECK-LABEL: test1
+ ;; CHECK-NEXT: ret void
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %A, i8* %B, i32 0, i32 1, i1 false)
+ ret void
}
define void @test2(i8* %A, i32 %N) {
- ;; dest can't alias source since we can't write to source!
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %A, i8* getelementptr inbounds ([33 x i8], [33 x i8]* @S, i32 0, i32 0), i32 %N, i32 1, i1 false)
- ret void
+ ;; dest can't alias source since we can't write to source!
+ ;; CHECK-LABEL: test2
+ ;; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %A, i8* getelementptr inbounds ([33 x i8], [33 x i8]* @S, i{{32|64}} 0, i{{32|64}} 0), i32 %N, i32 1, i1 false)
+ ;; CHECK-NEXT: ret void
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %A, i8* getelementptr inbounds ([33 x i8], [33 x i8]* @S, i32 0, i32 0), i32 %N, i32 1, i1 false)
+ ret void
}
-define i32 @test3() {
- %h_p = getelementptr [2 x i8], [2 x i8]* @h, i32 0, i32 0 ; <i8*> [#uses=1]
- %hel_p = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0 ; <i8*> [#uses=1]
- %hello_u_p = getelementptr [8 x i8], [8 x i8]* @hello_u, i32 0, i32 0 ; <i8*> [#uses=1]
- %target = alloca [1024 x i8] ; <[1024 x i8]*> [#uses=1]
- %target_p = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0 ; <i8*> [#uses=3]
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %h_p, i32 2, i32 2, i1 false)
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %hel_p, i32 4, i32 4, i1 false)
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %hello_u_p, i32 8, i32 8, i1 false)
- ret i32 0
+define i32 @test3([1024 x i8]* %target) { ; arg: [1024 x i8]*> [#uses=1]
+ ;; CHECK-LABEL: test3
+ ;; CHECK-NEXT: [[P1:%[^\s]+]] = bitcast [1024 x i8]* %target to i16*
+ ;; CHECK-NEXT: store i16 104, i16* [[P1]], align 2
+ ;; CHECK-NEXT: [[P2:%[^\s]+]] = bitcast [1024 x i8]* %target to i32*
+ ;; CHECK-NEXT: store i32 7103848, i32* [[P2]], align 4
+ ;; CHECK-NEXT: [[P3:%[^\s]+]] = bitcast [1024 x i8]* %target to i64*
+ ;; CHECK-NEXT: store i64 33037504440198504, i64* [[P3]], align 8
+ ;; CHECK-NEXT: ret i32 0
+ %h_p = getelementptr [2 x i8], [2 x i8]* @h, i32 0, i32 0 ; <i8*> [#uses=1]
+ %hel_p = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0 ; <i8*> [#uses=1]
+ %hello_u_p = getelementptr [8 x i8], [8 x i8]* @hello_u, i32 0, i32 0 ; <i8*> [#uses=1]
+ %target_p = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0 ; <i8*> [#uses=3]
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %h_p, i32 2, i32 2, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %hel_p, i32 4, i32 4, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %hello_u_p, i32 8, i32 8, i1 false)
+ ret i32 0
}
; PR2370
define void @test4(i8* %a) {
+ ;; CHECK-LABEL: test4
+ ;; CHECK-NEXT: ret void
tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %a, i8* %a, i32 100, i32 1, i1 false)
ret void
}
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) argmemonly nounwind
diff --git a/test/Transforms/InstCombine/memset.ll b/test/Transforms/InstCombine/memset.ll
index dfafcf948d1cd..08bbf8ba1ef3e 100644
--- a/test/Transforms/InstCombine/memset.ll
+++ b/test/Transforms/InstCombine/memset.ll
@@ -1,7 +1,16 @@
-; RUN: opt < %s -instcombine -S | not grep "call.*llvm.memset"
+; RUN: opt < %s -instcombine -S | FileCheck %s
-define i32 @main() {
- %target = alloca [1024 x i8]
+define i32 @test([1024 x i8]* %target) {
+ ;; CHECK-LABEL: test
+ ;; CHECK-NEXT: [[P1:%[^\s]+]] = getelementptr inbounds [1024 x i8], [1024 x i8]* %target, i64 0, i64 0
+ ;; CHECK-NEXT: store i8 1, i8* [[P1]], align 1
+ ;; CHECK-NEXT: [[P2:%[^\s]+]] = bitcast [1024 x i8]* %target to i16*
+ ;; CHECK-NEXT: store i16 257, i16* [[P2]], align 2
+ ;; CHECK-NEXT: [[P3:%[^\s]+]] = bitcast [1024 x i8]* %target to i32*
+ ;; CHECK-NEXT: store i32 16843009, i32* [[P3]], align 4
+ ;; CHECK-NEXT: [[P4:%[^\s]+]] = bitcast [1024 x i8]* %target to i64*
+ ;; CHECK-NEXT: store i64 72340172838076673, i64* [[P4]], align 8
+ ;; CHECK-NEXT: ret i32 0
%target_p = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 0, i32 1, i1 false)
call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 1, i32 1, i1 false)
@@ -11,4 +20,4 @@ define i32 @main() {
ret i32 0
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) argmemonly nounwind
diff --git a/test/Transforms/InstCombine/mul.ll b/test/Transforms/InstCombine/mul.ll
index 6e67c7fa08a06..1d9d0a6a9fa0c 100644
--- a/test/Transforms/InstCombine/mul.ll
+++ b/test/Transforms/InstCombine/mul.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; This test makes sure that mul instructions are properly eliminated.
; RUN: opt < %s -instcombine -S | FileCheck %s
@@ -297,6 +298,15 @@ define i32 @test32(i32 %X) {
; CHECK-NEXT: ret i32 %[[shl]]
}
+define <2 x i32> @test32vec(<2 x i32> %X) {
+; CHECK-LABEL: @test32vec(
+; CHECK-NEXT: [[MUL:%.*]] = shl nsw <2 x i32> [[X:%.*]], <i32 31, i32 31>
+; CHECK-NEXT: ret <2 x i32> [[MUL]]
+;
+ %mul = mul nsw <2 x i32> %X, <i32 -2147483648, i32 -2147483648>
+ ret <2 x i32> %mul
+}
+
define i32 @test33(i32 %X) {
; CHECK-LABEL: @test33
%mul = mul nsw i32 %X, 1073741824
@@ -304,3 +314,21 @@ define i32 @test33(i32 %X) {
; CHECK-NEXT: ret i32 %[[shl]]
ret i32 %mul
}
+
+define <2 x i32> @test33vec(<2 x i32> %X) {
+; CHECK-LABEL: @test33vec(
+; CHECK-NEXT: [[MUL:%.*]] = shl nsw <2 x i32> [[X:%.*]], <i32 30, i32 30>
+; CHECK-NEXT: ret <2 x i32> [[MUL]]
+;
+ %mul = mul nsw <2 x i32> %X, <i32 1073741824, i32 1073741824>
+ ret <2 x i32> %mul
+}
+
+define i128 @test34(i128 %X) {
+; CHECK-LABEL: @test34(
+; CHECK-NEXT: [[MUL:%.*]] = shl nsw i128 [[X:%.*]], 1
+; CHECK-NEXT: ret i128 [[MUL]]
+;
+ %mul = mul nsw i128 %X, 2
+ ret i128 %mul
+}
diff --git a/test/Transforms/InstCombine/or-xor.ll b/test/Transforms/InstCombine/or-xor.ll
index af62c2dd4ba32..2164f0df8d279 100644
--- a/test/Transforms/InstCombine/or-xor.ll
+++ b/test/Transforms/InstCombine/or-xor.ll
@@ -178,7 +178,7 @@ define i32 @test13(i32 %x, i32 %y) {
; ((x | ~y) ^ (~x | y)) -> x ^ y
define i32 @test14(i32 %x, i32 %y) {
; CHECK-LABEL: @test14(
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 %x, %y
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 %y, %x
; CHECK-NEXT: ret i32 [[XOR]]
;
%noty = xor i32 %y, -1
@@ -191,7 +191,7 @@ define i32 @test14(i32 %x, i32 %y) {
define i32 @test14_commuted(i32 %x, i32 %y) {
; CHECK-LABEL: @test14_commuted(
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 %x, %y
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 %y, %x
; CHECK-NEXT: ret i32 [[XOR]]
;
%noty = xor i32 %y, -1
@@ -205,7 +205,7 @@ define i32 @test14_commuted(i32 %x, i32 %y) {
; ((x & ~y) ^ (~x & y)) -> x ^ y
define i32 @test15(i32 %x, i32 %y) {
; CHECK-LABEL: @test15(
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 %x, %y
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 %y, %x
; CHECK-NEXT: ret i32 [[XOR]]
;
%noty = xor i32 %y, -1
@@ -218,7 +218,7 @@ define i32 @test15(i32 %x, i32 %y) {
define i32 @test15_commuted(i32 %x, i32 %y) {
; CHECK-LABEL: @test15_commuted(
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 %x, %y
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 %y, %x
; CHECK-NEXT: ret i32 [[XOR]]
;
%noty = xor i32 %y, -1
@@ -344,3 +344,71 @@ define i8 @test18(i8 %A, i8 %B) {
%res = mul i8 %or, %xor2 ; to increase the use count for the xor
ret i8 %res
}
+
+; ((x | y) ^ (~x | ~y)) -> ~(x ^ y)
+define i32 @test19(i32 %x, i32 %y) {
+; CHECK-LABEL: @test19(
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[OR2_DEMORGAN:%.*]] = and i32 [[X]], [[Y]]
+; CHECK-NEXT: [[OR2:%.*]] = xor i32 [[OR2_DEMORGAN]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR1]], [[OR2]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %noty = xor i32 %y, -1
+ %notx = xor i32 %x, -1
+ %or1 = or i32 %x, %y
+ %or2 = or i32 %notx, %noty
+ %xor = xor i32 %or1, %or2
+ ret i32 %xor
+}
+
+; ((x | y) ^ (~y | ~x)) -> ~(x ^ y)
+define i32 @test20(i32 %x, i32 %y) {
+; CHECK-LABEL: @test20(
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[OR2_DEMORGAN:%.*]] = and i32 [[Y]], [[X]]
+; CHECK-NEXT: [[OR2:%.*]] = xor i32 [[OR2_DEMORGAN]], -1
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR1]], [[OR2]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %noty = xor i32 %y, -1
+ %notx = xor i32 %x, -1
+ %or1 = or i32 %x, %y
+ %or2 = or i32 %noty, %notx
+ %xor = xor i32 %or1, %or2
+ ret i32 %xor
+}
+
+; ((~x | ~y) ^ (x | y)) -> ~(x ^ y)
+define i32 @test21(i32 %x, i32 %y) {
+; CHECK-LABEL: @test21(
+; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = xor i32 [[OR1_DEMORGAN]], -1
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[X]], [[Y]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR2]], [[OR1]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %noty = xor i32 %y, -1
+ %notx = xor i32 %x, -1
+ %or1 = or i32 %notx, %noty
+ %or2 = or i32 %x, %y
+ %xor = xor i32 %or1, %or2
+ ret i32 %xor
+}
+
+; ((~x | ~y) ^ (y | x)) -> ~(x ^ y)
+define i32 @test22(i32 %x, i32 %y) {
+; CHECK-LABEL: @test22(
+; CHECK-NEXT: [[OR1_DEMORGAN:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = xor i32 [[OR1_DEMORGAN]], -1
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[Y]], [[X]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR2]], [[OR1]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %noty = xor i32 %y, -1
+ %notx = xor i32 %x, -1
+ %or1 = or i32 %notx, %noty
+ %or2 = or i32 %y, %x
+ %xor = xor i32 %or1, %or2
+ ret i32 %xor
+}
diff --git a/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll b/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
index 894bf6db0a428..3ac02795b4786 100644
--- a/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
+++ b/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
@@ -318,10 +318,33 @@ entry:
ret i16 %cond
}
+define i128 @test7(i128 %x) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i128 @llvm.ctlz.i128(i128 [[X:%.*]], i1 false), !range !3
+; CHECK-NEXT: ret i128 [[TMP1]]
+;
+ %1 = tail call i128 @llvm.ctlz.i128(i128 %x, i1 true)
+ %tobool = icmp ne i128 %x, 0
+ %cond = select i1 %tobool, i128 %1, i128 128
+ ret i128 %cond
+}
+
+define i128 @test8(i128 %x) {
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i128 @llvm.cttz.i128(i128 [[X:%.*]], i1 false), !range !3
+; CHECK-NEXT: ret i128 [[TMP1]]
+;
+ %1 = tail call i128 @llvm.cttz.i128(i128 %x, i1 true)
+ %tobool = icmp ne i128 %x, 0
+ %cond = select i1 %tobool, i128 %1, i128 128
+ ret i128 %cond
+}
declare i16 @llvm.ctlz.i16(i16, i1)
declare i32 @llvm.ctlz.i32(i32, i1)
declare i64 @llvm.ctlz.i64(i64, i1)
+declare i128 @llvm.ctlz.i128(i128, i1)
declare i16 @llvm.cttz.i16(i16, i1)
declare i32 @llvm.cttz.i32(i32, i1)
declare i64 @llvm.cttz.i64(i64, i1)
+declare i128 @llvm.cttz.i128(i128, i1)
diff --git a/test/Transforms/InstCombine/select-with-bitwise-ops.ll b/test/Transforms/InstCombine/select-with-bitwise-ops.ll
index caec9412a7fd6..437f09be2e7c9 100644
--- a/test/Transforms/InstCombine/select-with-bitwise-ops.ll
+++ b/test/Transforms/InstCombine/select-with-bitwise-ops.ll
@@ -104,10 +104,10 @@ define i32 @select_icmp_ne_0_and_32_or_4096(i32 %x, i32 %y) {
define i8 @select_icmp_ne_0_and_1073741824_or_8(i32 %x, i8 %y) {
; CHECK-LABEL: @select_icmp_ne_0_and_1073741824_or_8(
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1073741824
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
-; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], 8
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i8 [[Y]], i8 [[OR]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 1073741824
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: [[OR:%.*]] = or i8 %y, 8
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i8 [[OR]], i8 %y
; CHECK-NEXT: ret i8 [[SELECT]]
;
%and = and i32 %x, 1073741824
@@ -119,10 +119,10 @@ define i8 @select_icmp_ne_0_and_1073741824_or_8(i32 %x, i8 %y) {
define i32 @select_icmp_ne_0_and_8_or_1073741824(i8 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_ne_0_and_8_or_1073741824(
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[AND]], 0
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 1073741824
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[Y]], i32 [[OR]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 %x, 8
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], 0
+; CHECK-NEXT: [[OR:%.*]] = or i32 %y, 1073741824
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[OR]], i32 %y
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i8 %x, 8
@@ -271,8 +271,8 @@ define i32 @test65(i64 %x) {
define i32 @test66(i64 %x) {
; CHECK-LABEL: @test66(
; CHECK-NEXT: [[TMP1:%.*]] = and i64 %x, 4294967296
-; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 40, i32 42
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 42, i32 40
; CHECK-NEXT: ret i32 [[TMP3]]
;
%1 = and i64 %x, 4294967296
@@ -376,10 +376,10 @@ define i32 @no_shift_xor_multiuse_or(i32 %x, i32 %y) {
define i32 @shift_xor_multiuse_or(i32 %x, i32 %y) {
; CHECK-LABEL: @shift_xor_multiuse_or(
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 2048
-; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[Y]], i32 [[OR]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 4096
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: [[OR:%.*]] = or i32 %y, 2048
+; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[OR]], i32 %y
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[OR]]
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -430,11 +430,11 @@ define i32 @no_shift_no_xor_multiuse_cmp(i32 %x, i32 %y, i32 %z, i32 %w) {
define i32 @no_shift_xor_multiuse_cmp(i32 %x, i32 %y, i32 %z, i32 %w) {
; CHECK-LABEL: @no_shift_xor_multiuse_cmp(
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
+; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 4096
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 4096
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
-; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], %y
+; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 %w, i32 %z
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[TMP2]], [[SELECT2]]
; CHECK-NEXT: ret i32 [[RES]]
;
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
index 0f94235982b99..c8f2a50b72eda 100644
--- a/test/Transforms/InstCombine/select.ll
+++ b/test/Transforms/InstCombine/select.ll
@@ -1220,12 +1220,13 @@ entry:
}
define i32 @test_select_select0(i32 %a, i32 %r0, i32 %r1, i32 %v1, i32 %v2) {
- ; CHECK-LABEL: @test_select_select0(
- ; CHECK: %[[C0:.*]] = icmp sge i32 %a, %v1
- ; CHECK-NEXT: %[[C1:.*]] = icmp slt i32 %a, %v2
- ; CHECK-NEXT: %[[C:.*]] = and i1 %[[C1]], %[[C0]]
- ; CHECK-NEXT: %[[SEL:.*]] = select i1 %[[C]], i32 %r0, i32 %r1
- ; CHECK-NEXT: ret i32 %[[SEL]]
+; CHECK-LABEL: @test_select_select0(
+; CHECK-NEXT: [[C0:%.*]] = icmp slt i32 %a, %v1
+; CHECK-NEXT: [[S0:%.*]] = select i1 [[C0]], i32 %r1, i32 %r0
+; CHECK-NEXT: [[C1:%.*]] = icmp slt i32 %a, %v2
+; CHECK-NEXT: [[S1:%.*]] = select i1 [[C1]], i32 [[S0]], i32 %r1
+; CHECK-NEXT: ret i32 [[S1]]
+;
%c0 = icmp sge i32 %a, %v1
%s0 = select i1 %c0, i32 %r0, i32 %r1
%c1 = icmp slt i32 %a, %v2
@@ -1234,12 +1235,13 @@ define i32 @test_select_select0(i32 %a, i32 %r0, i32 %r1, i32 %v1, i32 %v2) {
}
define i32 @test_select_select1(i32 %a, i32 %r0, i32 %r1, i32 %v1, i32 %v2) {
- ; CHECK-LABEL: @test_select_select1(
- ; CHECK: %[[C0:.*]] = icmp sge i32 %a, %v1
- ; CHECK-NEXT: %[[C1:.*]] = icmp slt i32 %a, %v2
- ; CHECK-NEXT: %[[C:.*]] = or i1 %[[C1]], %[[C0]]
- ; CHECK-NEXT: %[[SEL:.*]] = select i1 %[[C]], i32 %r0, i32 %r1
- ; CHECK-NEXT: ret i32 %[[SEL]]
+; CHECK-LABEL: @test_select_select1(
+; CHECK-NEXT: [[C0:%.*]] = icmp slt i32 %a, %v1
+; CHECK-NEXT: [[S0:%.*]] = select i1 [[C0]], i32 %r1, i32 %r0
+; CHECK-NEXT: [[C1:%.*]] = icmp slt i32 %a, %v2
+; CHECK-NEXT: [[S1:%.*]] = select i1 [[C1]], i32 %r0, i32 [[S0]]
+; CHECK-NEXT: ret i32 [[S1]]
+;
%c0 = icmp sge i32 %a, %v1
%s0 = select i1 %c0, i32 %r0, i32 %r1
%c1 = icmp slt i32 %a, %v2
diff --git a/test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll b/test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll
index a038fd1a411b8..c8efb41ce7376 100644
--- a/test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll
+++ b/test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll
@@ -774,3 +774,28 @@ define void @load_factor2_fp128(<4 x fp128>* %ptr) {
%v1 = shufflevector <4 x fp128> %interleaved.vec, <4 x fp128> undef, <2 x i32> <i32 1, i32 3>
ret void
}
+
+define <4 x i1> @load_large_vector(<12 x i64 *>* %p) {
+; NEON-LABEL: @load_large_vector(
+; NEON: [[LDN:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0v2i64(<2 x i64>*
+; NEON-NEXT: [[TMP1:%.*]] = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } [[LDN]], 1
+; NEON-NEXT: [[TMP2:%.*]] = inttoptr <2 x i64> [[TMP1]] to <2 x i64*>
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } [[LDN]], 0
+; NEON-NEXT: [[TMP4:%.*]] = inttoptr <2 x i64> [[TMP3]] to <2 x i64*>
+; NEON: [[LDN1:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0v2i64(<2 x i64>*
+; NEON-NEXT: [[TMP5:%.*]] = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } [[LDN1]], 1
+; NEON-NEXT: [[TMP6:%.*]] = inttoptr <2 x i64> [[TMP5]] to <2 x i64*>
+; NEON-NEXT: [[TMP7:%.*]] = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } [[LDN1]], 0
+; NEON-NEXT: [[TMP8:%.*]] = inttoptr <2 x i64> [[TMP7]] to <2 x i64*>
+; NEON-NEXT: shufflevector <2 x i64*> [[TMP2]], <2 x i64*> [[TMP6]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NEON-NEXT: shufflevector <2 x i64*> [[TMP4]], <2 x i64*> [[TMP8]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NO_NEON-LABEL: @load_large_vector(
+; NO_NEON-NOT: @llvm.aarch64.neon
+; NO_NEON: ret
+;
+ %l = load <12 x i64 *>, <12 x i64 *>* %p
+ %s1 = shufflevector <12 x i64 *> %l, <12 x i64 *> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+ %s2 = shufflevector <12 x i64 *> %l, <12 x i64 *> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+ %ret = icmp ne <4 x i64 *> %s1, %s2
+ ret <4 x i1> %ret
+}
diff --git a/test/Transforms/JumpThreading/range-compare.ll b/test/Transforms/JumpThreading/range-compare.ll
new file mode 100644
index 0000000000000..54e94d06649bb
--- /dev/null
+++ b/test/Transforms/JumpThreading/range-compare.ll
@@ -0,0 +1,125 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+;RUN: opt < %s -jump-threading -S | FileCheck %s
+
+
+declare void @bar(...)
+declare void @baz(...)
+
+; Make sure we thread the end of the bar block to the end of the function.
+define void @test1(i32 %x) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], 9
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_END_THREAD:%.*]], label [[IF_END:%.*]]
+; CHECK: if.end.thread:
+; CHECK-NEXT: call void (...) @bar()
+; CHECK-NEXT: br label [[IF_END4:%.*]]
+; CHECK: if.end:
+; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], -3
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X_OFF]], 5
+; CHECK-NEXT: br i1 [[TMP0]], label [[IF_THEN3:%.*]], label [[IF_END4]]
+; CHECK: if.then3:
+; CHECK-NEXT: call void (...) @baz()
+; CHECK-NEXT: br label [[IF_END4]]
+; CHECK: if.end4:
+; CHECK-NEXT: ret void
+;
+entry:
+ %cmp = icmp sgt i32 %x, 9
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void (...) @bar()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %x.off = add i32 %x, -3
+ %0 = icmp ult i32 %x.off, 5
+ br i1 %0, label %if.then3, label %if.end4
+
+if.then3: ; preds = %if.end
+ call void (...) @baz()
+ br label %if.end4
+
+if.end4: ; preds = %if.then3, %if.end
+ ret void
+}
+
+; Make sure we thread the false side of the first if to the end of the function.
+define void @test2(i32 %x) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], 9
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_END:%.*]], label [[IF_END4:%.*]]
+; CHECK: if.end:
+; CHECK-NEXT: call void (...) @bar()
+; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], -3
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X_OFF]], 5
+; CHECK-NEXT: br i1 [[TMP0]], label [[IF_THEN3:%.*]], label [[IF_END4]]
+; CHECK: if.then3:
+; CHECK-NEXT: call void (...) @baz()
+; CHECK-NEXT: br label [[IF_END4]]
+; CHECK: if.end4:
+; CHECK-NEXT: ret void
+;
+entry:
+ %cmp = icmp slt i32 %x, 9
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void (...) @bar()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %x.off = add i32 %x, -3
+ %0 = icmp ult i32 %x.off, 5
+ br i1 %0, label %if.then3, label %if.end4
+
+if.then3: ; preds = %if.end
+ call void (...) @baz()
+ br label %if.end4
+
+if.end4: ; preds = %if.then3, %if.end
+ ret void
+}
+
+; Negative test to make sure we don't thread when the ranges overlap.
+define void @test3(i32 %x) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], 6
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: call void (...) @bar()
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X]], -3
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X_OFF]], 5
+; CHECK-NEXT: br i1 [[TMP0]], label [[IF_THEN3:%.*]], label [[IF_END4:%.*]]
+; CHECK: if.then3:
+; CHECK-NEXT: call void (...) @baz()
+; CHECK-NEXT: br label [[IF_END4]]
+; CHECK: if.end4:
+; CHECK-NEXT: ret void
+;
+entry:
+ %cmp = icmp sgt i32 %x, 6
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void (...) @bar()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %x.off = add i32 %x, -3
+ %0 = icmp ult i32 %x.off, 5
+ br i1 %0, label %if.then3, label %if.end4
+
+if.then3: ; preds = %if.end
+ call void (...) @baz()
+ br label %if.end4
+
+if.end4: ; preds = %if.then3, %if.end
+ ret void
+}
+
diff --git a/test/Transforms/LICM/dropped-tbaa.ll b/test/Transforms/LICM/dropped-tbaa.ll
new file mode 100644
index 0000000000000..7d37ca55c1880
--- /dev/null
+++ b/test/Transforms/LICM/dropped-tbaa.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -scoped-noalias -tbaa -licm -S | FileCheck %s
+
+; This test case case is generated from the following C code with -fstrict-aliasing,
+; and after passing through -inline -mem2reg -loop-rotate -instcombine
+; void add(double *restrict data, int *restrict addend) {
+; *data += *addend;
+; }
+;
+; void foo(double *data, int *addend) {
+; for (int i = 0; i < 1000; ++i) {
+; *data += *addend;
+; add(data, addend);
+; }
+; }
+; We want to make sure the load of addend gets hoisted, independent of the second load
+; load having different noalias metadata.
+
+define void @foo(double* %data, i32* %addend) #0 {
+; CHECK: for.body.lr.ph:
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ADDEND:%.*]], align 4, !tbaa !1
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to double
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ADDEND]], align 4, !tbaa !1, !alias.scope !5, !noalias !8
+; CHECK-NEXT: [[CONV_I:%.*]] = sitofp i32 [[TMP2]] to double
+entry:
+ %i = alloca i32, align 4
+ %0 = bitcast i32* %i to i8*
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #2
+ store i32 0, i32* %i, align 4, !tbaa !1
+ br i1 true, label %for.body.lr.ph, label %for.cond.cleanup
+
+for.body.lr.ph: ; preds = %entry
+ br label %for.body
+
+for.cond.for.cond.cleanup_crit_edge: ; preds = %for.inc
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.for.cond.cleanup_crit_edge, %entry
+ %1 = bitcast i32* %i to i8*
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %1) #2
+ br label %for.end
+
+for.body: ; preds = %for.body.lr.ph, %for.inc
+ %2 = load i32, i32* %addend, align 4, !tbaa !1
+ %conv = sitofp i32 %2 to double
+ %3 = load i32, i32* %i, align 4, !tbaa !1
+ %idxprom = sext i32 %3 to i64
+ %arrayidx = getelementptr inbounds double, double* %data, i64 %idxprom
+ %4 = load double, double* %arrayidx, align 8, !tbaa !5
+ %add = fadd double %4, %conv
+ store double %add, double* %arrayidx, align 8, !tbaa !5
+ %idxprom1 = sext i32 %3 to i64
+ %arrayidx2 = getelementptr inbounds double, double* %data, i64 %idxprom1
+ %5 = load i32, i32* %addend, align 4, !tbaa !1, !alias.scope !7, !noalias !10
+ %conv.i = sitofp i32 %5 to double
+ %6 = load double, double* %arrayidx2, align 8, !tbaa !5, !alias.scope !10, !noalias !7
+ %add.i = fadd double %6, %conv.i
+ store double %add.i, double* %arrayidx2, align 8, !tbaa !5, !alias.scope !10, !noalias !7
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32, i32* %i, align 4, !tbaa !1
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4, !tbaa !1
+ %cmp = icmp slt i32 %inc, 1000
+ br i1 %cmp, label %for.body, label %for.cond.for.cond.cleanup_crit_edge
+
+for.end: ; preds = %for.cond.cleanup
+ ret void
+}
+
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+
+attributes #0 = { argmemonly nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 5.0.0 (llvm/trunk 299971)"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!6, !6, i64 0}
+!6 = !{!"double", !3, i64 0}
+!7 = !{!8}
+!8 = distinct !{!8, !9, !"add: %addend"}
+!9 = distinct !{!9, !"add"}
+!10 = !{!11}
+!11 = distinct !{!11, !9, !"add: %data"}
diff --git a/test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll b/test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll
new file mode 100644
index 0000000000000..b2930dc5f89e6
--- /dev/null
+++ b/test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll
@@ -0,0 +1,169 @@
+; RUN: opt < %s -S -loop-unroll -mtriple aarch64 -mcpu=falkor | FileCheck %s
+; RUN: opt < %s -S -loop-unroll -mtriple aarch64 -mcpu=falkor -enable-falkor-hwpf-unroll-fix=0 | FileCheck %s --check-prefix=NOHWPF
+
+; Check that loop unroller doesn't exhaust HW prefetcher resources.
+
+; Partial unroll 2 times for this loop on falkor instead of 4.
+; NOHWPF-LABEL: @unroll1(
+; NOHWPF-LABEL: loop:
+; NOHWPF-NEXT: phi
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: icmp
+; NOHWPF-NEXT: br
+; NOHWPF-NEXT-LABEL: exit:
+;
+; CHECK-LABEL: @unroll1(
+; CHECK-LABEL: loop:
+; CHECK-NEXT: phi
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: icmp
+; CHECK-NEXT: br
+; CHECK-NEXT-LABEL: exit:
+define void @unroll1(i32* %p, i32* %p2) {
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
+
+ %gep = getelementptr inbounds i32, i32* %p, i32 %iv
+ %load = load volatile i32, i32* %gep
+
+ %gep2 = getelementptr inbounds i32, i32* %p2, i32 %iv
+ %load2 = load volatile i32, i32* %gep2
+
+ %inc = add i32 %iv, 1
+ %exitcnd = icmp uge i32 %inc, 1024
+ br i1 %exitcnd, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; Partial unroll 4 times for this loop on falkor instead of 8.
+; NOHWPF-LABEL: @unroll2(
+; NOHWPF-LABEL: loop2:
+; NOHWPF-NEXT: phi
+; NOHWPF-NEXT: phi
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: icmp
+; NOHWPF-NEXT: br
+; NOHWPF-NEXT-LABEL: exit2:
+;
+; CHECK-LABEL: @unroll2(
+; CHECK-LABEL: loop2:
+; CHECK-NEXT: phi
+; CHECK-NEXT: phi
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: icmp
+; CHECK-NEXT: br
+; CHECK-NEXT-LABEL: exit2:
+
+define void @unroll2(i32* %p) {
+entry:
+ br label %loop1
+
+loop1:
+ %iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ]
+ %outer.sum = phi i32 [ 0, %entry ], [ %sum, %loop1.latch ]
+ br label %loop2.header
+
+loop2.header:
+ br label %loop2
+
+loop2:
+ %iv2 = phi i32 [ 0, %loop2.header ], [ %inc2, %loop2 ]
+ %sum = phi i32 [ %outer.sum, %loop2.header ], [ %sum.inc, %loop2 ]
+ %gep = getelementptr inbounds i32, i32* %p, i32 %iv2
+ %load = load i32, i32* %gep
+ %sum.inc = add i32 %sum, %load
+ %inc2 = add i32 %iv2, 1
+ %exitcnd2 = icmp uge i32 %inc2, 1024
+ br i1 %exitcnd2, label %exit2, label %loop2
+
+exit2:
+ br label %loop1.latch
+
+loop1.latch:
+ %inc1 = add i32 %iv1, 1
+ %exitcnd1 = icmp uge i32 %inc1, 1024
+ br i1 %exitcnd2, label %exit, label %loop1
+
+exit:
+ ret void
+}
+
diff --git a/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll b/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll
new file mode 100644
index 0000000000000..1f31a133e34d9
--- /dev/null
+++ b/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll
@@ -0,0 +1,279 @@
+; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine -S| FileCheck %s
+; RUN: opt < %s -loop-unroll -unroll-runtime -unroll-count=2 -unroll-runtime-epilog=true -unroll-runtime-multi-exit=true -verify-dom-info -verify-loop-info -instcombine
+
+; the second RUN generates an epilog remainder block for all the test
+; cases below (it does not generate a loop).
+
+; test with three exiting and three exit blocks.
+; none of the exit blocks have successors
+define void @test1(i64 %trip, i1 %cond) {
+; CHECK-LABEL: test1
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[TRIP:%.*]], -1
+; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TRIP]], 7
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7
+; CHECK-NEXT: br i1 [[TMP1]], label %exit2.loopexit.unr-lcssa, label [[ENTRY_NEW:%.*]]
+; CHECK: entry.new:
+; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TRIP]], [[XTRAITER]]
+; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
+; CHECK-LABEL: loop_latch.epil:
+; CHECK-NEXT: %epil.iter.sub = add i64 %epil.iter, -1
+; CHECK-NEXT: %epil.iter.cmp = icmp eq i64 %epil.iter.sub, 0
+; CHECK-NEXT: br i1 %epil.iter.cmp, label %exit2.loopexit.epilog-lcssa, label %loop_header.epil
+; CHECK-LABEL: loop_latch.7:
+; CHECK-NEXT: %niter.nsub.7 = add i64 %niter, -8
+; CHECK-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.nsub.7, 0
+; CHECK-NEXT: br i1 %niter.ncmp.7, label %exit2.loopexit.unr-lcssa.loopexit, label %loop_header
+entry:
+ br label %loop_header
+
+loop_header:
+ %iv = phi i64 [ 0, %entry ], [ %iv_next, %loop_latch ]
+ br i1 %cond, label %loop_latch, label %loop_exiting_bb1
+
+loop_exiting_bb1:
+ br i1 false, label %loop_exiting_bb2, label %exit1
+
+loop_exiting_bb2:
+ br i1 false, label %loop_latch, label %exit3
+
+exit3:
+ ret void
+
+loop_latch:
+ %iv_next = add i64 %iv, 1
+ %cmp = icmp ne i64 %iv_next, %trip
+ br i1 %cmp, label %loop_header, label %exit2.loopexit
+
+exit1:
+ ret void
+
+exit2.loopexit:
+ ret void
+}
+
+
+; test with three exiting and two exit blocks.
+; The non-latch exit block has 2 unique predecessors.
+; There are 2 values passed to the exit blocks that are calculated at every iteration.
+; %sum.02 and %add. Both of these are incoming values for phi from every exiting
+; unrolled block.
+define i32 @test2(i32* nocapture %a, i64 %n) {
+; CHECK-LABEL: test2
+; CHECK-LABEL: for.exit2.loopexit:
+; CHECK-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %for.body ], [ 42, %for.exiting_block.1 ], [ %add.1, %for.body.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %for.body.2 ], [ 42, %for.exiting_block.3 ],
+; CHECK-NEXT: br label %for.exit2
+; CHECK-LABEL: for.exit2.loopexit2:
+; CHECK-NEXT: %retval.ph3 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ]
+; CHECK-NEXT: br label %for.exit2
+; CHECK-LABEL: for.exit2:
+; CHECK-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph3, %for.exit2.loopexit2 ]
+; CHECK-NEXT: ret i32 %retval
+; CHECK: %niter.nsub.7 = add i64 %niter, -8
+entry:
+ br label %header
+
+header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+ br i1 false, label %for.exit2, label %for.exiting_block
+
+for.exiting_block:
+ %cmp = icmp eq i64 %n, 42
+ br i1 %cmp, label %for.exit2, label %for.body
+
+for.body:
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %sum.02
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %n
+ br i1 %exitcond, label %for.end, label %header
+
+for.end: ; preds = %for.body
+ %sum.0.lcssa = phi i32 [ %add, %for.body ]
+ ret i32 %sum.0.lcssa
+
+for.exit2:
+ %retval = phi i32 [ %sum.02, %header ], [ 42, %for.exiting_block ]
+ ret i32 %retval
+}
+
+; test with two exiting and three exit blocks.
+; the non-latch exiting block has a switch.
+define void @test3(i64 %trip, i64 %add) {
+; CHECK-LABEL: test3
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[TRIP:%.*]], -1
+; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TRIP]], 7
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7
+; CHECK-NEXT: br i1 [[TMP1]], label %exit2.loopexit.unr-lcssa, label [[ENTRY_NEW:%.*]]
+; CHECK: entry.new:
+; CHECK-NEXT: %unroll_iter = sub i64 [[TRIP]], [[XTRAITER]]
+; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
+; CHECK-LABEL: loop_header:
+; CHECK-NEXT: %sum = phi i64 [ 0, %entry.new ], [ %sum.next.7, %loop_latch.7 ]
+; CHECK-NEXT: %niter = phi i64 [ %unroll_iter, %entry.new ], [ %niter.nsub.7, %loop_latch.7 ]
+; CHECK-LABEL: loop_exiting_bb1.7:
+; CHECK-NEXT: switch i64 %sum.next.6, label %loop_latch.7
+; CHECK-LABEL: loop_latch.7:
+; CHECK-NEXT: %sum.next.7 = add i64 %sum.next.6, %add
+; CHECK-NEXT: %niter.nsub.7 = add i64 %niter, -8
+; CHECK-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.nsub.7, 0
+; CHECK-NEXT: br i1 %niter.ncmp.7, label %exit2.loopexit.unr-lcssa.loopexit, label %loop_header
+entry:
+ br label %loop_header
+
+loop_header:
+ %iv = phi i64 [ 0, %entry ], [ %iv_next, %loop_latch ]
+ %sum = phi i64 [ 0, %entry ], [ %sum.next, %loop_latch ]
+ br i1 undef, label %loop_latch, label %loop_exiting_bb1
+
+loop_exiting_bb1:
+ switch i64 %sum, label %loop_latch [
+ i64 24, label %exit1
+ i64 42, label %exit3
+ ]
+
+exit3:
+ ret void
+
+loop_latch:
+ %iv_next = add nuw nsw i64 %iv, 1
+ %sum.next = add i64 %sum, %add
+ %cmp = icmp ne i64 %iv_next, %trip
+ br i1 %cmp, label %loop_header, label %exit2.loopexit
+
+exit1:
+ ret void
+
+exit2.loopexit:
+ ret void
+}
+
+; FIXME: Support multiple exiting blocks to the same latch exit block.
+define i32 @test4(i32* nocapture %a, i64 %n, i1 %cond) {
+; CHECK-LABEL: test4
+; CHECK-NOT: .unr
+; CHECK-NOT: .epil
+entry:
+ br label %header
+
+header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+ br i1 %cond, label %for.end, label %for.exiting_block
+
+for.exiting_block:
+ %cmp = icmp eq i64 %n, 42
+ br i1 %cmp, label %for.exit2, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %sum.02
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %n
+ br i1 %exitcond, label %for.end, label %header
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i32 [ 0, %header ], [ %add, %for.body ]
+ ret i32 %sum.0.lcssa
+
+for.exit2:
+ ret i32 42
+}
+
+; two exiting and two exit blocks.
+; the non-latch exiting block has duplicate edges to the non-latch exit block.
+define i64 @test5(i64 %trip, i64 %add, i1 %cond) {
+; CHECK-LABEL: test5
+; CHECK-LABEL: exit1.loopexit:
+; CHECK-NEXT: %result.ph = phi i64 [ %ivy, %loop_exiting ], [ %ivy, %loop_exiting ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.2, %loop_exiting.2 ],
+; CHECK-NEXT: br label %exit1
+; CHECK-LABEL: exit1.loopexit2:
+; CHECK-NEXT: %ivy.epil = add i64 %iv.epil, %add
+; CHECK-NEXT: br label %exit1
+; CHECK-LABEL: exit1:
+; CHECK-NEXT: %result = phi i64 [ %result.ph, %exit1.loopexit ], [ %ivy.epil, %exit1.loopexit2 ]
+; CHECK-NEXT: ret i64 %result
+; CHECK-LABEL: loop_latch.7:
+; CHECK: %niter.nsub.7 = add i64 %niter, -8
+entry:
+ br label %loop_header
+
+loop_header:
+ %iv = phi i64 [ 0, %entry ], [ %iv_next, %loop_latch ]
+ %sum = phi i64 [ 0, %entry ], [ %sum.next, %loop_latch ]
+ br i1 %cond, label %loop_latch, label %loop_exiting
+
+loop_exiting:
+ %ivy = add i64 %iv, %add
+ switch i64 %sum, label %loop_latch [
+ i64 24, label %exit1
+ i64 42, label %exit1
+ ]
+
+loop_latch:
+ %iv_next = add nuw nsw i64 %iv, 1
+ %sum.next = add i64 %sum, %add
+ %cmp = icmp ne i64 %iv_next, %trip
+ br i1 %cmp, label %loop_header, label %latchexit
+
+exit1:
+ %result = phi i64 [ %ivy, %loop_exiting ], [ %ivy, %loop_exiting ]
+ ret i64 %result
+
+latchexit:
+ ret i64 %sum.next
+}
+
+; test when exit blocks have successors.
+define i32 @test6(i32* nocapture %a, i64 %n, i1 %cond, i32 %x) {
+; CHECK-LABEL: test6
+; CHECK-LABEL: for.exit2.loopexit:
+; CHECK-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %latch ], [ 42, %for.exiting_block.1 ], [ %add.1, %latch.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %latch.2 ],
+; CHECK-NEXT: br label %for.exit2
+; CHECK-LABEL: for.exit2.loopexit2:
+; CHECK-NEXT: %retval.ph3 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ]
+; CHECK-NEXT: br label %for.exit2
+; CHECK-LABEL: for.exit2:
+; CHECK-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph3, %for.exit2.loopexit2 ]
+; CHECK-NEXT: br i1 %cond, label %exit_true, label %exit_false
+; CHECK-LABEL: latch.7:
+; CHECK: %niter.nsub.7 = add i64 %niter, -8
+entry:
+ br label %header
+
+header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %latch ], [ 0, %entry ]
+ %sum.02 = phi i32 [ %add, %latch ], [ 0, %entry ]
+ br i1 false, label %for.exit2, label %for.exiting_block
+
+for.exiting_block:
+ %cmp = icmp eq i64 %n, 42
+ br i1 %cmp, label %for.exit2, label %latch
+
+latch:
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %load = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %load, %sum.02
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %n
+ br i1 %exitcond, label %latch_exit, label %header
+
+latch_exit:
+ %sum.0.lcssa = phi i32 [ %add, %latch ]
+ ret i32 %sum.0.lcssa
+
+for.exit2:
+ %retval = phi i32 [ %sum.02, %header ], [ 42, %for.exiting_block ]
+ %addx = add i32 %retval, %x
+ br i1 %cond, label %exit_true, label %exit_false
+
+exit_true:
+ ret i32 %retval
+
+exit_false:
+ ret i32 %addx
+}
diff --git a/test/Transforms/LoopUnroll/unroll-maxcount.ll b/test/Transforms/LoopUnroll/unroll-maxcount.ll
new file mode 100644
index 0000000000000..4cbd757aec229
--- /dev/null
+++ b/test/Transforms/LoopUnroll/unroll-maxcount.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -S -loop-unroll -unroll-allow-partial -unroll-max-count=1 | FileCheck %s
+; Checks that unroll MaxCount is honored.
+;
+; CHECK-LABEL: @foo(
+; CHECK-LABEL: for.body:
+; CHECK-NEXT: phi
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: store
+; CHECK-NEXT: add
+; CHECK-NEXT: icmp
+; CHECK-NEXT: br
+define void @foo(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
index 8d139ac7e5af0..46fd022af6653 100644
--- a/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
+++ b/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
@@ -3,10 +3,11 @@
; CHECK: LV: Loop hints: force=enabled
; CHECK: LV: Loop hints: force=?
+; CHECK: LV: Loop hints: force=?
; No more loops in the module
; CHECK-NOT: LV: Loop hints: force=
-; CHECK: 2 loop-vectorize - Number of loops analyzed for vectorization
-; CHECK: 1 loop-vectorize - Number of loops vectorized
+; CHECK: 3 loop-vectorize - Number of loops analyzed for vectorization
+; CHECK: 2 loop-vectorize - Number of loops vectorized
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -71,3 +72,29 @@ for.end:
!3 = !{!3}
+;
+; This loop will be vectorized as the trip count is below the threshold but no
+; scalar iterations are needed.
+;
+define void @vectorized2(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
+ %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
+ %1 = load float, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %add = fadd fast float %0, %1
+ store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 16
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !4
+
+for.end:
+ ret void
+}
+
+!4 = !{!4}
+
diff --git a/test/Transforms/LoopVectorize/first-order-recurrence.ll b/test/Transforms/LoopVectorize/first-order-recurrence.ll
index 3d1c78038e328..0ff94c1450acf 100644
--- a/test/Transforms/LoopVectorize/first-order-recurrence.ll
+++ b/test/Transforms/LoopVectorize/first-order-recurrence.ll
@@ -2,6 +2,8 @@
; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -dce -instcombine -S | FileCheck %s --check-prefix=UNROLL
; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -S | FileCheck %s --check-prefix=UNROLL-NO-IC
; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -S | FileCheck %s --check-prefix=UNROLL-NO-VF
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s --check-prefix=SINK-AFTER
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s --check-prefix=NO-SINK-AFTER
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
@@ -295,14 +297,14 @@ for.cond.cleanup3:
; UNROLL-NO-IC-NEXT: [[TMP28:%.*]] = load i32, i32* {{.*}}
; UNROLL-NO-IC-NEXT: [[TMP29:%.*]] = load i32, i32* {{.*}}
; UNROLL-NO-IC-NEXT: [[TMP30:%.*]] = load i32, i32* {{.*}}
-; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = load i32, i32* {{.*}}
-; UNROLL-NO-IC-NEXT: [[TMP32:%.*]] = load i32, i32* {{.*}}
-; UNROLL-NO-IC-NEXT: [[TMP33:%.*]] = load i32, i32* {{.*}}
-; UNROLL-NO-IC-NEXT: [[TMP34:%.*]] = load i32, i32* {{.*}}
; UNROLL-NO-IC-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> undef, i32 [[TMP27]], i32 0
; UNROLL-NO-IC-NEXT: [[TMP36:%.*]] = insertelement <4 x i32> [[TMP35]], i32 [[TMP28]], i32 1
; UNROLL-NO-IC-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP36]], i32 [[TMP29]], i32 2
; UNROLL-NO-IC-NEXT: [[TMP38:%.*]] = insertelement <4 x i32> [[TMP37]], i32 [[TMP30]], i32 3
+; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP32:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP33:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP34:%.*]] = load i32, i32* {{.*}}
; UNROLL-NO-IC-NEXT: [[TMP39:%.*]] = insertelement <4 x i32> undef, i32 [[TMP31]], i32 0
; UNROLL-NO-IC-NEXT: [[TMP40:%.*]] = insertelement <4 x i32> [[TMP39]], i32 [[TMP32]], i32 1
; UNROLL-NO-IC-NEXT: [[TMP41:%.*]] = insertelement <4 x i32> [[TMP40]], i32 [[TMP33]], i32 2
@@ -396,3 +398,132 @@ for.body:
for.end:
ret i32 %val.phi
}
+
+; We vectorize this first order recurrence, with a set of insertelements for
+; each unrolled part. Make sure these insertelements are generated in-order,
+; because the shuffle of the first order recurrence will be added after the
+; insertelement of the last part UF - 1, assuming the latter appears after the
+; insertelements of all other parts.
+;
+; int PR33613(double *b, double j, int d) {
+; int a = 0;
+; for(int i = 0; i < 10240; i++, b+=25) {
+; double f = b[d]; // Scalarize to form insertelements
+; if (j * f)
+; a++;
+; j = f;
+; }
+; return a;
+; }
+;
+; UNROLL-NO-IC-LABEL: @PR33613(
+; UNROLL-NO-IC: vector.body:
+; UNROLL-NO-IC: [[VECTOR_RECUR:%.*]] = phi <4 x double>
+; UNROLL-NO-IC: shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> {{.*}}, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC-NEXT: shufflevector <4 x double> {{.*}}, <4 x double> {{.*}}, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC-NOT: insertelement <4 x double>
+; UNROLL-NO-IC: middle.block:
+;
+define i32 @PR33613(double* %b, double %j, i32 %d) {
+entry:
+ %idxprom = sext i32 %d to i64
+ br label %for.body
+
+for.cond.cleanup:
+ %a.1.lcssa = phi i32 [ %a.1, %for.body ]
+ ret i32 %a.1.lcssa
+
+for.body:
+ %b.addr.012 = phi double* [ %b, %entry ], [ %add.ptr, %for.body ]
+ %i.011 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
+ %a.010 = phi i32 [ 0, %entry ], [ %a.1, %for.body ]
+ %j.addr.09 = phi double [ %j, %entry ], [ %0, %for.body ]
+ %arrayidx = getelementptr inbounds double, double* %b.addr.012, i64 %idxprom
+ %0 = load double, double* %arrayidx, align 8
+ %mul = fmul double %j.addr.09, %0
+ %tobool = fcmp une double %mul, 0.000000e+00
+ %inc = zext i1 %tobool to i32
+ %a.1 = add nsw i32 %a.010, %inc
+ %inc1 = add nuw nsw i32 %i.011, 1
+ %add.ptr = getelementptr inbounds double, double* %b.addr.012, i64 25
+ %exitcond = icmp eq i32 %inc1, 10240
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; void sink_after(short *a, int n, int *b) {
+; for(int i = 0; i < n; i++)
+; b[i] = (a[i] * a[i + 1]);
+; }
+;
+; SINK-AFTER-LABEL: sink_after
+; Check that the sext sank after the load in the vector loop.
+; SINK-AFTER: vector.body
+; SINK-AFTER: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ %wide.load, %vector.body ]
+; SINK-AFTER: %wide.load = load <4 x i16>
+; SINK-AFTER: %[[VSHUF:.+]] = shufflevector <4 x i16> %vector.recur, <4 x i16> %wide.load, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; SINK-AFTER: %[[VCONV:.+]] = sext <4 x i16> %[[VSHUF]] to <4 x i32>
+; SINK-AFTER: %[[VCONV3:.+]] = sext <4 x i16> %wide.load to <4 x i32>
+; SINK-AFTER: mul nsw <4 x i32> %[[VCONV3]], %[[VCONV]]
+; Check also that the sext sank after the load in the scalar loop.
+; SINK-AFTER: for.body
+; SINK-AFTER: %scalar.recur = phi i16 [ %scalar.recur.init, %scalar.ph ], [ %[[LOAD:.+]], %for.body ]
+; SINK-AFTER: %[[LOAD]] = load i16, i16* %arrayidx2
+; SINK-AFTER: %[[CONV:.+]] = sext i16 %scalar.recur to i32
+; SINK-AFTER: %[[CONV3:.+]] = sext i16 %[[LOAD]] to i32
+; SINK-AFTER: %mul = mul nsw i32 %[[CONV3]], %[[CONV]]
+;
+define void @sink_after(i16* %a, i32* %b, i64 %n) {
+entry:
+ %.pre = load i16, i16* %a
+ br label %for.body
+
+for.body:
+ %0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ]
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %conv = sext i16 %0 to i32
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx2 = getelementptr inbounds i16, i16* %a, i64 %indvars.iv.next
+ %1 = load i16, i16* %arrayidx2
+ %conv3 = sext i16 %1 to i32
+ %mul = mul nsw i32 %conv3, %conv
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
+ store i32 %mul, i32* %arrayidx5
+ %exitcond = icmp eq i64 %indvars.iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+; void no_sink_after(short *a, int n, int *b) {
+; for(int i = 0; i < n; i++)
+; b[i] = ((a[i] + 2) * a[i + 1]);
+; }
+;
+; NO-SINK-AFTER-LABEL: no_sink_after
+; NO-SINK-AFTER-NOT: vector.ph:
+; NO-SINK-AFTER: }
+;
+define void @no_sink_after(i16* %a, i32* %b, i64 %n) {
+entry:
+ %.pre = load i16, i16* %a
+ br label %for.body
+
+for.body:
+ %0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ]
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %conv = sext i16 %0 to i32
+ %add = add nsw i32 %conv, 2
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx2 = getelementptr inbounds i16, i16* %a, i64 %indvars.iv.next
+ %1 = load i16, i16* %arrayidx2
+ %conv3 = sext i16 %1 to i32
+ %mul = mul nsw i32 %add, %conv3
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
+ store i32 %mul, i32* %arrayidx5
+ %exitcond = icmp eq i64 %indvars.iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/if-conversion.ll b/test/Transforms/LoopVectorize/if-conversion.ll
index d3a16e2075d1a..ad50e0b00fc6e 100644
--- a/test/Transforms/LoopVectorize/if-conversion.ll
+++ b/test/Transforms/LoopVectorize/if-conversion.ll
@@ -18,7 +18,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
;CHECK-LABEL: @function0(
;CHECK: load <4 x i32>
-;CHECK: icmp sle <4 x i32>
+;CHECK: icmp sgt <4 x i32>
;CHECK: mul <4 x i32>
;CHECK: add <4 x i32>
;CHECK: select <4 x i1>
diff --git a/test/Transforms/LoopVectorize/minmax_reduction.ll b/test/Transforms/LoopVectorize/minmax_reduction.ll
index 19a401213fd5b..fd5ad7c38b099 100644
--- a/test/Transforms/LoopVectorize/minmax_reduction.ll
+++ b/test/Transforms/LoopVectorize/minmax_reduction.ll
@@ -244,7 +244,7 @@ for.end:
; SGE -> SLT
; Turn this into a min reduction (select inputs are reversed).
; CHECK-LABEL: @sge_min_red(
-; CHECK: icmp sge <2 x i32>
+; CHECK: icmp slt <2 x i32>
; CHECK: select <2 x i1>
; CHECK: middle.block
; CHECK: icmp slt <2 x i32>
@@ -273,7 +273,7 @@ for.end:
; SLE -> SGT
; Turn this into a max reduction (select inputs are reversed).
; CHECK-LABEL: @sle_min_red(
-; CHECK: icmp sle <2 x i32>
+; CHECK: icmp sgt <2 x i32>
; CHECK: select <2 x i1>
; CHECK: middle.block
; CHECK: icmp sgt <2 x i32>
@@ -302,7 +302,7 @@ for.end:
; UGE -> ULT
; Turn this into a min reduction (select inputs are reversed).
; CHECK-LABEL: @uge_min_red(
-; CHECK: icmp uge <2 x i32>
+; CHECK: icmp ult <2 x i32>
; CHECK: select <2 x i1>
; CHECK: middle.block
; CHECK: icmp ult <2 x i32>
@@ -331,7 +331,7 @@ for.end:
; ULE -> UGT
; Turn this into a max reduction (select inputs are reversed).
; CHECK-LABEL: @ule_min_red(
-; CHECK: icmp ule <2 x i32>
+; CHECK: icmp ugt <2 x i32>
; CHECK: select <2 x i1>
; CHECK: middle.block
; CHECK: icmp ugt <2 x i32>
diff --git a/test/Transforms/LoopVectorize/small-loop.ll b/test/Transforms/LoopVectorize/small-loop.ll
index 9a5dc4aa1b746..378283b464b99 100644
--- a/test/Transforms/LoopVectorize/small-loop.ll
+++ b/test/Transforms/LoopVectorize/small-loop.ll
@@ -7,7 +7,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
@c = common global [2048 x i32] zeroinitializer, align 16
;CHECK-LABEL: @example1(
-;CHECK-NOT: load <4 x i32>
+;CHECK: load <4 x i32>
;CHECK: ret void
define void @example1() nounwind uwtable ssp {
br label %1
@@ -23,8 +23,8 @@ define void @example1() nounwind uwtable ssp {
store i32 %6, i32* %7, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, 8 ; <----- A really small trip count.
- br i1 %exitcond, label %8, label %1
+ %exitcond = icmp eq i32 %lftr.wideiv, 8 ; <----- A really small trip count
+ br i1 %exitcond, label %8, label %1 ; w/o scalar iteration overhead.
; <label>:8 ; preds = %1
ret void
diff --git a/test/Transforms/LowerTypeTests/export-icall.ll b/test/Transforms/LowerTypeTests/export-icall.ll
index f53b63af496ca..ad36048993067 100644
--- a/test/Transforms/LowerTypeTests/export-icall.ll
+++ b/test/Transforms/LowerTypeTests/export-icall.ll
@@ -60,6 +60,11 @@ declare !type !8 void @f(i32 %x)
; SUMMARY-NEXT: SizeM1BitWidth: 0
; SUMMARY-NEXT: WPDRes:
-; SUMMARY: CfiFunctionDefs: [ f, g, h ]
-; SUMMARY-NEXT: CfiFunctionDecls: [ external, external_weak ]
+; SUMMARY: CfiFunctionDefs:
+; SUMMARY-NEXT: - f
+; SUMMARY-NEXT: - g
+; SUMMARY-NEXT: - h
+; SUMMARY-NEXT: CfiFunctionDecls:
+; SUMMARY-NEXT: - external
+; SUMMARY-NEXT: - external_weak
; SUMMARY-NEXT: ...
diff --git a/test/Transforms/Reassociate/erase_inst_made_change.ll b/test/Transforms/Reassociate/erase_inst_made_change.ll
new file mode 100644
index 0000000000000..febb9447e2b43
--- /dev/null
+++ b/test/Transforms/Reassociate/erase_inst_made_change.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -inline -reassociate -S | FileCheck %s
+
+; This test case exposed a bug in reassociate where EraseInst's
+; removal of a dead call wasn't recognized as changing the IR.
+; So when runOnFunction propagated the "made changes" upwards
+; to the CallGraphSCCPass it signalled that no changes had been
+; made, so CallGraphSCCPass assumed that the old CallGraph,
+; as known by that pass manager, still was up-to-date.
+;
+; This was detected as an assert when trying to remove the
+; no longer used function 'bar' (due to incorrect reference
+; count in the CallGraph).
+
+define void @foo() {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret void
+entry:
+ call void @bar()
+ ret void
+}
+
+define internal void @bar() noinline nounwind readnone {
+; CHECK-NOT: bar
+entry:
+ ret void
+}
+
+
diff --git a/test/Transforms/SLPVectorizer/X86/limit.ll b/test/Transforms/SLPVectorizer/X86/limit.ll
new file mode 100644
index 0000000000000..41db490a754f6
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/limit.ll
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s --instcombine -slp-vectorizer -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@b = common global [4 x i32] zeroinitializer, align 16
+@c = common global [4 x i32] zeroinitializer, align 16
+@d = common global [4 x i32] zeroinitializer, align 16
+@e = common global [4 x i32] zeroinitializer, align 16
+@a = common global [4 x i32] zeroinitializer, align 16
+@fb = common global [4 x float] zeroinitializer, align 16
+@fc = common global [4 x float] zeroinitializer, align 16
+@fa = common global [4 x float] zeroinitializer, align 16
+@fd = common global [4 x float] zeroinitializer, align 16
+
+define void @addsub() {
+; CHECK-LABEL: @addsub(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @c to <4 x i32>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP0]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @d to <4 x i32>*), align 16
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @e to <4 x i32>*), align 16
+; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[TMP2]], [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 16
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4
+ %1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4
+ %add = add nsw i32 %0, %1
+ br label %bb1
+bb1: ; preds = %entry
+ %2 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 0), align 4
+ %3 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 0), align 4
+ %add1 = add nsw i32 %2, %3
+ %add2 = add nsw i32 %add, %add1
+ store i32 %add2, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 0), align 4
+ %4 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 1), align 4
+ %5 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 1), align 4
+ %add3 = add nsw i32 %4, %5
+ %6 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 1), align 4
+ %7 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 1), align 4
+ %add4 = add nsw i32 %6, %7
+ %sub = sub nsw i32 %add3, %add4
+ store i32 %sub, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 1), align 4
+ %8 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 2), align 4
+ %9 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 2), align 4
+ %add5 = add nsw i32 %8, %9
+ %10 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 2), align 4
+ %11 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 2), align 4
+ %add6 = add nsw i32 %10, %11
+ %add7 = add nsw i32 %add5, %add6
+ store i32 %add7, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 2), align 4
+ %12 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 3), align 4
+ %13 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 3), align 4
+ %add8 = add nsw i32 %12, %13
+ %14 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 3), align 4
+ %15 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 3), align 4
+ %add9 = add nsw i32 %14, %15
+ %sub10 = sub nsw i32 %add8, %add9
+ store i32 %sub10, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 3), align 4
+ ret void
+}
+
diff --git a/test/Transforms/SROA/alloca-address-space.ll b/test/Transforms/SROA/alloca-address-space.ll
index 6b3b3abbff5ff..9d9f78f07ca1c 100644
--- a/test/Transforms/SROA/alloca-address-space.ll
+++ b/test/Transforms/SROA/alloca-address-space.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -sroa -S | FileCheck %s
-target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64-A2"
+target datalayout = "e-p:64:64:64-p1:16:16:16-p2:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64-A2"
declare void @llvm.memcpy.p2i8.p2i8.i32(i8 addrspace(2)* nocapture, i8 addrspace(2)* nocapture readonly, i32, i32, i1)
declare void @llvm.memcpy.p1i8.p2i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(2)* nocapture readonly, i32, i32, i1)
@@ -82,3 +82,32 @@ define void @pr27557() {
store i32 addrspace(3)* @l, i32 addrspace(3)* addrspace(2)* %3, align 8
ret void
}
+
+; Test load from and store to non-zero address space.
+define void @test_load_store_diff_addr_space([2 x float] addrspace(1)* %complex1, [2 x float] addrspace(1)* %complex2) {
+; CHECK-LABEL: @test_load_store_diff_addr_space
+; CHECK-NOT: alloca
+; CHECK: load i32, i32 addrspace(1)*
+; CHECK: load i32, i32 addrspace(1)*
+; CHECK: store i32 %{{.*}}, i32 addrspace(1)*
+; CHECK: store i32 %{{.*}}, i32 addrspace(1)*
+ %a0 = alloca [2 x i64], align 8, addrspace(2)
+ %a = getelementptr [2 x i64], [2 x i64] addrspace(2)* %a0, i32 0, i32 0
+ %a.cast = bitcast i64 addrspace(2)* %a to [2 x float] addrspace(2)*
+ %a.gep1 = getelementptr [2 x float], [2 x float] addrspace(2)* %a.cast, i32 0, i32 0
+ %a.gep2 = getelementptr [2 x float], [2 x float] addrspace(2)* %a.cast, i32 0, i32 1
+ %complex1.gep = getelementptr [2 x float], [2 x float] addrspace(1)* %complex1, i32 0, i32 0
+ %p1 = bitcast float addrspace(1)* %complex1.gep to i64 addrspace(1)*
+ %v1 = load i64, i64 addrspace(1)* %p1
+ store i64 %v1, i64 addrspace(2)* %a
+ %f1 = load float, float addrspace(2)* %a.gep1
+ %f2 = load float, float addrspace(2)* %a.gep2
+ %sum = fadd float %f1, %f2
+ store float %sum, float addrspace(2)* %a.gep1
+ store float %sum, float addrspace(2)* %a.gep2
+ %v2 = load i64, i64 addrspace(2)* %a
+ %complex2.gep = getelementptr [2 x float], [2 x float] addrspace(1)* %complex2, i32 0, i32 0
+ %p2 = bitcast float addrspace(1)* %complex2.gep to i64 addrspace(1)*
+ store i64 %v2, i64 addrspace(1)* %p2
+ ret void
+}
diff --git a/test/Transforms/SROA/preserve-nonnull.ll b/test/Transforms/SROA/preserve-nonnull.ll
index fc5ce6a445fa0..a29da6dc2c377 100644
--- a/test/Transforms/SROA/preserve-nonnull.ll
+++ b/test/Transforms/SROA/preserve-nonnull.ll
@@ -3,24 +3,90 @@
; Make sure that SROA doesn't lose nonnull metadata
; on loads from allocas that get optimized out.
-; CHECK-LABEL: define float* @yummy_nonnull
-; CHECK: [[RETURN:%(.*)]] = load float*, float** %arg, align 8
-; CHECK: [[ASSUME:%(.*)]] = icmp ne float* {{.*}}[[RETURN]], null
-; CHECK: call void @llvm.assume(i1 {{.*}}[[ASSUME]])
-; CHECK: ret float* {{.*}}[[RETURN]]
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
-define float* @yummy_nonnull(float** %arg) {
-entry-block:
- %buf = alloca float*
+; Check that we do basic propagation of nonnull when rewriting.
+define i8* @propagate_nonnull(i32* %v) {
+; CHECK-LABEL: define i8* @propagate_nonnull(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %[[A:.*]] = alloca i8*
+; CHECK-NEXT: %[[V_CAST:.*]] = bitcast i32* %v to i8*
+; CHECK-NEXT: store i8* %[[V_CAST]], i8** %[[A]]
+; CHECK-NEXT: %[[LOAD:.*]] = load volatile i8*, i8** %[[A]], !nonnull !0
+; CHECK-NEXT: ret i8* %[[LOAD]]
+entry:
+ %a = alloca [2 x i8*]
+ %a.gep0 = getelementptr [2 x i8*], [2 x i8*]* %a, i32 0, i32 0
+ %a.gep1 = getelementptr [2 x i8*], [2 x i8*]* %a, i32 0, i32 1
+ %a.gep0.cast = bitcast i8** %a.gep0 to i32**
+ %a.gep1.cast = bitcast i8** %a.gep1 to i32**
+ store i32* %v, i32** %a.gep1.cast
+ store i32* null, i32** %a.gep0.cast
+ %load = load volatile i8*, i8** %a.gep1, !nonnull !0
+ ret i8* %load
+}
- %_arg_i8 = bitcast float** %arg to i8*
- %_buf_i8 = bitcast float** %buf to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %_buf_i8, i8* %_arg_i8, i64 8, i32 8, i1 false)
+define float* @turn_nonnull_into_assume(float** %arg) {
+; CHECK-LABEL: define float* @turn_nonnull_into_assume(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %[[RETURN:.*]] = load float*, float** %arg, align 8
+; CHECK-NEXT: %[[ASSUME:.*]] = icmp ne float* %[[RETURN]], null
+; CHECK-NEXT: call void @llvm.assume(i1 %[[ASSUME]])
+; CHECK-NEXT: ret float* %[[RETURN]]
+entry:
+ %buf = alloca float*
+ %_arg_i8 = bitcast float** %arg to i8*
+ %_buf_i8 = bitcast float** %buf to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %_buf_i8, i8* %_arg_i8, i64 8, i32 8, i1 false)
+ %ret = load float*, float** %buf, align 8, !nonnull !0
+ ret float* %ret
+}
- %ret = load float*, float** %buf, align 8, !nonnull !0
- ret float* %ret
+; Make sure we properly handle the !nonnull attribute when we convert
+; a pointer load to an integer load.
+; FIXME: While this doesn't do anythnig actively harmful today, it really
+; should propagate the !nonnull metadata to range metadata. The irony is, it
+; *does* initially, but then we lose that !range metadata before we finish
+; SROA.
+define i8* @propagate_nonnull_to_int() {
+; CHECK-LABEL: define i8* @propagate_nonnull_to_int(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %[[A:.*]] = alloca i64
+; CHECK-NEXT: store i64 42, i64* %[[A]]
+; CHECK-NEXT: %[[LOAD:.*]] = load volatile i64, i64* %[[A]]
+; CHECK-NEXT: %[[CAST:.*]] = inttoptr i64 %[[LOAD]] to i8*
+; CHECK-NEXT: ret i8* %[[CAST]]
+entry:
+ %a = alloca [2 x i8*]
+ %a.gep0 = getelementptr [2 x i8*], [2 x i8*]* %a, i32 0, i32 0
+ %a.gep1 = getelementptr [2 x i8*], [2 x i8*]* %a, i32 0, i32 1
+ %a.gep0.cast = bitcast i8** %a.gep0 to i64*
+ %a.gep1.cast = bitcast i8** %a.gep1 to i64*
+ store i64 42, i64* %a.gep1.cast
+ store i64 0, i64* %a.gep0.cast
+ %load = load volatile i8*, i8** %a.gep1, !nonnull !0
+ ret i8* %load
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
+; Make sure we properly handle the !nonnull attribute when we convert
+; a pointer load to an integer load and immediately promote it to an SSA
+; register. This can fail in interesting ways due to the rewrite iteration of
+; SROA, resulting in PR32902.
+define i8* @propagate_nonnull_to_int_and_promote() {
+; CHECK-LABEL: define i8* @propagate_nonnull_to_int_and_promote(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %[[PROMOTED_VALUE:.*]] = inttoptr i64 42 to i8*
+; CHECK-NEXT: ret i8* %[[PROMOTED_VALUE]]
+entry:
+ %a = alloca [2 x i8*], align 8
+ %a.gep0 = getelementptr [2 x i8*], [2 x i8*]* %a, i32 0, i32 0
+ %a.gep1 = getelementptr [2 x i8*], [2 x i8*]* %a, i32 0, i32 1
+ %a.gep0.cast = bitcast i8** %a.gep0 to i64*
+ %a.gep1.cast = bitcast i8** %a.gep1 to i64*
+ store i64 42, i64* %a.gep1.cast
+ store i64 0, i64* %a.gep0.cast
+ %load = load i8*, i8** %a.gep1, align 8, !nonnull !0
+ ret i8* %load
+}
!0 = !{}
diff --git a/test/Transforms/SimplifyCFG/Hexagon/lit.local.cfg b/test/Transforms/SimplifyCFG/Hexagon/lit.local.cfg
new file mode 100644
index 0000000000000..a1f0ecbf67927
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/Hexagon/lit.local.cfg
@@ -0,0 +1,5 @@
+config.suffixes = ['.ll']
+
+targets = set(config.root.targets_to_build.split())
+if not 'Hexagon' in targets:
+ config.unsupported = True
diff --git a/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll b/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll
new file mode 100644
index 0000000000000..4bc1251572aa4
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll
@@ -0,0 +1,62 @@
+; RUN: opt -S -O2 < %s | FileCheck %s -check-prefix=ENABLE
+; RUN: opt -S -hexagon-emit-lookup-tables=true -O2 < %s | FileCheck %s -check-prefix=ENABLE
+; RUN: opt -S -hexagon-emit-lookup-tables=false -O2 < %s | FileCheck %s -check-prefix=DISABLE
+
+
+; ENABLE: @{{.*}} = private unnamed_addr constant [6 x i32] [i32 9, i32 20, i32 14, i32 22, i32 12, i32 5]
+; DISABLE-NOT: @{{.*}} = private unnamed_addr constant [6 x i32] [i32 9, i32 20, i32 14, i32 22, i32 12, i32 5]
+; DISABLE : = phi i32 [ 19, %{{.*}} ], [ 5, %{{.*}} ], [ 12, %{{.*}} ], [ 22, %{{.*}} ], [ 14, %{{.*}} ], [ 20, %{{.*}} ], [ 9, %{{.*}} ]
+
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: noinline nounwind
+define i32 @foo(i32 %x) #0 section ".tcm_text" {
+entry:
+ %retval = alloca i32, align 4
+ %x.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ %0 = load i32, i32* %x.addr, align 4
+ switch i32 %0, label %sw.default [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ i32 4, label %sw.bb4
+ i32 5, label %sw.bb5
+ ]
+
+sw.bb: ; preds = %entry
+ store i32 9, i32* %retval, align 4
+ br label %return
+
+sw.bb1: ; preds = %entry
+ store i32 20, i32* %retval, align 4
+ br label %return
+
+sw.bb2: ; preds = %entry
+ store i32 14, i32* %retval, align 4
+ br label %return
+
+sw.bb3: ; preds = %entry
+ store i32 22, i32* %retval, align 4
+ br label %return
+
+sw.bb4: ; preds = %entry
+ store i32 12, i32* %retval, align 4
+ br label %return
+
+sw.bb5: ; preds = %entry
+ store i32 5, i32* %retval, align 4
+ br label %return
+
+sw.default: ; preds = %entry
+ store i32 19, i32* %retval, align 4
+ br label %return
+
+return: ; preds = %sw.default, %sw.bb5, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
+ %1 = load i32, i32* %retval, align 4
+ ret i32 %1
+}
+
+attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll b/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
index ae6ff6d10bcf0..e335c4078651f 100644
--- a/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
+++ b/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-darwin12.0.0"
; CHECK: entry:
; CHECK-NEXT: sub i3 %arg, -4
; CHECK-NEXT: zext i3 %switch.tableidx to i4
-; CHECK-NEXT: getelementptr inbounds [8 x i64], [8 x i64]* @switch.table, i32 0, i4 %switch.tableidx.zext
+; CHECK-NEXT: getelementptr inbounds [8 x i64], [8 x i64]* @switch.table.test, i32 0, i4 %switch.tableidx.zext
; CHECK-NEXT: load i64, i64* %switch.gep
; CHECK-NEXT: add i64
; CHECK-NEXT: ret i64
diff --git a/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll b/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
index 734312bc7285e..bd4e03cf91827 100644
--- a/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
+++ b/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-apple-darwin12.0.0"
; CHECK: entry:
; CHECK-NEXT: sub i2 %0, -2
; CHECK-NEXT: zext i2 %switch.tableidx to i3
-; CHECK-NEXT: getelementptr inbounds [4 x i64], [4 x i64]* @switch.table, i32 0, i3 %switch.tableidx.zext
+; CHECK-NEXT: getelementptr inbounds [4 x i64], [4 x i64]* @switch.table._TFO6reduce1E5toRawfS0_FT_Si, i32 0, i3 %switch.tableidx.zext
; CHECK-NEXT: load i64, i64* %switch.gep
; CHECK-NEXT: ret i64 %switch.load
define i64 @_TFO6reduce1E5toRawfS0_FT_Si(i2) {
diff --git a/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll b/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
index 4b9227b029eca..656a276969f39 100644
--- a/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
+++ b/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
@@ -4,25 +4,25 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-unknown-linux-gnu"
; The table for @f
-; CHECK: @switch.table = private unnamed_addr constant [7 x i32] [i32 55, i32 123, i32 0, i32 -1, i32 27, i32 62, i32 1]
+; CHECK: @switch.table.f = private unnamed_addr constant [7 x i32] [i32 55, i32 123, i32 0, i32 -1, i32 27, i32 62, i32 1]
; The float table for @h
-; CHECK: @switch.table.1 = private unnamed_addr constant [4 x float] [float 0x40091EB860000000, float 0x3FF3BE76C0000000, float 0x4012449BA0000000, float 0x4001AE1480000000]
+; CHECK: @switch.table.h = private unnamed_addr constant [4 x float] [float 0x40091EB860000000, float 0x3FF3BE76C0000000, float 0x4012449BA0000000, float 0x4001AE1480000000]
; The table for @foostring
-; CHECK: @switch.table.2 = private unnamed_addr constant [4 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str1, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str2, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str3, i64 0, i64 0)]
+; CHECK: @switch.table.foostring = private unnamed_addr constant [4 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str1, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str2, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str3, i64 0, i64 0)]
; The table for @earlyreturncrash
-; CHECK: @switch.table.3 = private unnamed_addr constant [4 x i32] [i32 42, i32 9, i32 88, i32 5]
+; CHECK: @switch.table.earlyreturncrash = private unnamed_addr constant [4 x i32] [i32 42, i32 9, i32 88, i32 5]
-; The table for @large.
-; CHECK: @switch.table.4 = private unnamed_addr constant [199 x i32] [i32 1, i32 4, i32 9,
+; The table for @large
+; CHECK: @switch.table.large = private unnamed_addr constant [199 x i32] [i32 1, i32 4, i32 9,
; The table for @cprop
-; CHECK: @switch.table.5 = private unnamed_addr constant [7 x i32] [i32 5, i32 42, i32 126, i32 -452, i32 128, i32 6, i32 7]
+; CHECK: @switch.table.cprop = private unnamed_addr constant [7 x i32] [i32 5, i32 42, i32 126, i32 -452, i32 128, i32 6, i32 7]
; The table for @unreachable_case
-; CHECK: @switch.table.6 = private unnamed_addr constant [9 x i32] [i32 0, i32 0, i32 0, i32 2, i32 -1, i32 1, i32 1, i32 1, i32 1]
+; CHECK: @switch.table.unreachable_case = private unnamed_addr constant [9 x i32] [i32 0, i32 0, i32 0, i32 2, i32 -1, i32 1, i32 1, i32 1, i32 1]
; A simple int-to-int selection switch.
; It is dense enough to be replaced by table lookup.
@@ -58,7 +58,7 @@ return:
; CHECK-NEXT: %0 = icmp ult i32 %switch.tableidx, 7
; CHECK-NEXT: br i1 %0, label %switch.lookup, label %return
; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table.f, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load i32, i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
; CHECK: return:
@@ -97,7 +97,7 @@ sw.epilog:
; CHECK-NEXT: %switch.shiftamt = mul i32 %switch.tableidx, 8
; CHECK-NEXT: %switch.downshift = lshr i32 89655594, %switch.shiftamt
; CHECK-NEXT: %switch.masked = trunc i32 %switch.downshift to i8
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x float], [4 x float]* @switch.table.1, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x float], [4 x float]* @switch.table.h, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load float, float* %switch.gep
; CHECK-NEXT: br label %sw.epilog
; CHECK: sw.epilog:
@@ -144,7 +144,7 @@ return:
; CHECK-NEXT: %0 = icmp ult i32 %switch.tableidx, 4
; CHECK-NEXT: br i1 %0, label %switch.lookup, label %return
; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i8*], [4 x i8*]* @switch.table.2, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i8*], [4 x i8*]* @switch.table.foostring, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load i8*, i8** %switch.gep
; CHECK-NEXT: ret i8* %switch.load
}
@@ -173,7 +173,7 @@ sw.epilog:
; CHECK-LABEL: @earlyreturncrash(
; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.3, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.earlyreturncrash, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load i32, i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
; CHECK: sw.epilog:
@@ -749,7 +749,7 @@ return:
; CHECK-LABEL: @cprop(
; CHECK: switch.lookup:
-; CHECK: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table.5, i32 0, i32 %switch.tableidx
+; CHECK: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table.cprop, i32 0, i32 %switch.tableidx
}
define i32 @unreachable_case(i32 %x) {
@@ -778,7 +778,7 @@ return:
; CHECK-LABEL: @unreachable_case(
; CHECK: switch.lookup:
-; CHECK: getelementptr inbounds [9 x i32], [9 x i32]* @switch.table.6, i32 0, i32 %switch.tableidx
+; CHECK: getelementptr inbounds [9 x i32], [9 x i32]* @switch.table.unreachable_case, i32 0, i32 %switch.tableidx
}
define i32 @unreachable_default(i32 %x) {
@@ -805,7 +805,7 @@ return:
; CHECK-NEXT: %switch.tableidx = sub i32 %x, 0
; CHECK-NOT: icmp
; CHECK-NOT: br 1i
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.7, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.unreachable_default, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load i32, i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
}
@@ -919,7 +919,7 @@ define i32 @threecases(i32 %c) {
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[SWITCH_TABLEIDX]], 3
; CHECK-NEXT: br i1 [[TMP0]], label %switch.lookup, label %return
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* @switch.table.10, i32 0, i32 [[SWITCH_TABLEIDX]]
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* @switch.table.threecases, i32 0, i32 [[SWITCH_TABLEIDX]]
; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]]
; CHECK-NEXT: ret i32 [[SWITCH_LOAD]]
; CHECK: return:
diff --git a/test/tools/llvm-cvtres/symbols.test b/test/tools/llvm-cvtres/symbols.test
new file mode 100644
index 0000000000000..2ca3a193ac404
--- /dev/null
+++ b/test/tools/llvm-cvtres/symbols.test
@@ -0,0 +1,33 @@
+// Check COFF emission of cvtres
+// The input was generated with the following command, using the original Windows
+// rc.exe:
+// > rc /fo test_resource.res /nologo test_resource.rc
+// The object file we are comparing against was generated with this command using
+// the original Windows cvtres.exe.
+// > cvtres /machine:X86 /readonly /nologo /out:test_resource.obj.coff \
+// test_resource.res
+
+RUN: llvm-cvtres /verbose /out:%t %p/Inputs/test_resource.res
+RUN: llvm-readobj -symbols %t | FileCheck %s
+
+CHECK: Name: $R000000
+CHECK-NEXT: Value: 0
+CHECK-NEXT: Section: .rsrc$02
+CHECK: Name: $R000018
+CHECK-NEXT: Value: 24
+CHECK-NEXT: Section: .rsrc$02
+CHECK: Name: $R000340
+CHECK-NEXT: Value: 832
+CHECK-NEXT: Section: .rsrc$02
+CHECK: Name: $R000668
+CHECK-NEXT: Value: 1640
+CHECK-NEXT: Section: .rsrc$02
+CHECK: Name: $R000698
+CHECK-NEXT: Value: 1688
+CHECK-NEXT: Section: .rsrc$02
+CHECK: Name: $R000720
+CHECK-NEXT: Value: 1824
+CHECK-NEXT: Section: .rsrc$02
+CHECK: Name: $R000750
+CHECK-NEXT: Value: 1872
+CHECK-NEXT: Section: .rsrc$02
diff --git a/test/tools/llvm-dwarfdump/X86/apple_names_verify_buckets.s b/test/tools/llvm-dwarfdump/X86/apple_names_verify_buckets.s
deleted file mode 100644
index 7b61a946281b3..0000000000000
--- a/test/tools/llvm-dwarfdump/X86/apple_names_verify_buckets.s
+++ /dev/null
@@ -1,192 +0,0 @@
-# RUN: llvm-mc %s -filetype obj -triple x86_64-apple-darwin -o - \
-# RUN: | not llvm-dwarfdump -verify - \
-# RUN: | FileCheck %s
-
-# CHECK: Verifying .apple_names...
-# CHECK-NEXT: error: Bucket[0] has invalid hash index: [-2]
-
-# This test is meant to verify that the -verify option
-# in llvm-dwarfdump, correctly identifies
-# an invalid hash index for bucket[0] in the .apple_names section.
-
- .section __TEXT,__text,regular,pure_instructions
- .file 1 "basic.c"
- .comm _i,4,2 ## @i
- .section __DWARF,__debug_str,regular,debug
-Linfo_string:
- .asciz "basic.c" ## string offset=42
- .asciz "i" ## string offset=84
- .asciz "int" ## string offset=86
- .section __DWARF,__debug_loc,regular,debug
-Lsection_debug_loc:
- .section __DWARF,__debug_abbrev,regular,debug
-Lsection_abbrev:
- .byte 1 ## Abbreviation Code
- .byte 17 ## DW_TAG_compile_unit
- .byte 1 ## DW_CHILDREN_yes
- .byte 37 ## DW_AT_producer
- .byte 14 ## DW_FORM_strp
- .byte 19 ## DW_AT_language
- .byte 5 ## DW_FORM_data2
- .byte 3 ## DW_AT_name
- .byte 14 ## DW_FORM_strp
- .byte 16 ## DW_AT_stmt_list
- .byte 23 ## DW_FORM_sec_offset
- .byte 27 ## DW_AT_comp_dir
- .byte 14 ## DW_FORM_strp
- .byte 0 ## EOM(1)
- .byte 0 ## EOM(2)
- .byte 2 ## Abbreviation Code
- .byte 52 ## DW_TAG_variable
- .byte 0 ## DW_CHILDREN_no
- .byte 3 ## DW_AT_name
- .byte 14 ## DW_FORM_strp
- .byte 73 ## DW_AT_type
- .byte 19 ## DW_FORM_ref4
- .byte 63 ## DW_AT_external
- .byte 25 ## DW_FORM_flag_present
- .byte 58 ## DW_AT_decl_file
- .byte 11 ## DW_FORM_data1
- .byte 59 ## DW_AT_decl_line
- .byte 11 ## DW_FORM_data1
- .byte 2 ## DW_AT_location
- .byte 24 ## DW_FORM_exprloc
- .byte 0 ## EOM(1)
- .byte 0 ## EOM(2)
- .byte 3 ## Abbreviation Code
- .byte 36 ## DW_TAG_base_type
- .byte 0 ## DW_CHILDREN_no
- .byte 3 ## DW_AT_name
- .byte 14 ## DW_FORM_strp
- .byte 62 ## DW_AT_encoding
- .byte 11 ## DW_FORM_data1
- .byte 11 ## DW_AT_byte_size
- .byte 11 ## DW_FORM_data1
- .byte 0 ## EOM(1)
- .byte 0 ## EOM(2)
- .byte 0 ## EOM(3)
- .section __DWARF,__debug_info,regular,debug
-Lsection_info:
-Lcu_begin0:
- .long 55 ## Length of Unit
- .short 4 ## DWARF version number
-Lset0 = Lsection_abbrev-Lsection_abbrev ## Offset Into Abbrev. Section
- .long Lset0
- .byte 8 ## Address Size (in bytes)
- .byte 1 ## Abbrev [1] 0xb:0x30 DW_TAG_compile_unit
- .long 0 ## DW_AT_producer
- .short 12 ## DW_AT_language
- .long 42 ## DW_AT_name
-Lset1 = Lline_table_start0-Lsection_line ## DW_AT_stmt_list
- .long Lset1
- .long 50 ## DW_AT_comp_dir
- .byte 2 ## Abbrev [2] 0x1e:0x15 DW_TAG_variable
- .long 84 ## DW_AT_name
- .long 51 ## DW_AT_type
- ## DW_AT_external
- .byte 1 ## DW_AT_decl_file
- .byte 1 ## DW_AT_decl_line
- .byte 9 ## DW_AT_location
- .byte 3
- .quad _i
- .byte 3 ## Abbrev [3] 0x33:0x7 DW_TAG_base_type
- .long 86 ## DW_AT_name
- .byte 5 ## DW_AT_encoding
- .byte 4 ## DW_AT_byte_size
- .byte 0 ## End Of Children Mark
- .section __DWARF,__debug_ranges,regular,debug
-Ldebug_range:
- .section __DWARF,__debug_macinfo,regular,debug
-Ldebug_macinfo:
-Lcu_macro_begin0:
- .byte 0 ## End Of Macro List Mark
- .section __DWARF,__apple_names,regular,debug
-Lnames_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 1 ## Header Hash Count
- .long 12 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 1 ## HeaderData Atom Count
- .short 1 ## DW_ATOM_die_offset
- .short 6 ## DW_FORM_data4
- .long -2 ## Bucket 0 -- error: Bucket[0] has invalid hash index: [-2]
- .long 177678 ## Hash in Bucket 0
- .long LNames0-Lnames_begin ## Offset in Bucket 0
-LNames0:
- .long 84 ## i
- .long 1 ## Num DIEs
- .long 30
- .long 0
- .section __DWARF,__apple_objc,regular,debug
-Lobjc_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 0 ## Header Hash Count
- .long 12 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 1 ## HeaderData Atom Count
- .short 1 ## DW_ATOM_die_offset
- .short 6 ## DW_FORM_data4
- .long -1 ## Bucket 0
- .section __DWARF,__apple_namespac,regular,debug
-Lnamespac_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 0 ## Header Hash Count
- .long 12 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 1 ## HeaderData Atom Count
- .short 1 ## DW_ATOM_die_offset
- .short 6 ## DW_FORM_data4
- .long -1 ## Bucket 0
- .section __DWARF,__apple_types,regular,debug
-Ltypes_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 1 ## Header Hash Count
- .long 20 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 3 ## HeaderData Atom Count
- .short 1 ## DW_ATOM_die_offset
- .short 6 ## DW_FORM_data4
- .short 3 ## DW_ATOM_die_tag
- .short 5 ## DW_FORM_data2
- .short 4 ## DW_ATOM_type_flags
- .short 11 ## DW_FORM_data1
- .long 0 ## Bucket 0
- .long 193495088 ## Hash in Bucket 0
- .long Ltypes0-Ltypes_begin ## Offset in Bucket 0
-Ltypes0:
- .long 86 ## int
- .long 1 ## Num DIEs
- .long 51
- .short 36
- .byte 0
- .long 0
- .section __DWARF,__apple_exttypes,regular,debug
-Lexttypes_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 0 ## Header Hash Count
- .long 12 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 1 ## HeaderData Atom Count
- .short 7 ## DW_ATOM_ext_types
- .short 6 ## DW_FORM_data4
- .long -1 ## Bucket 0
-
-.subsections_via_symbols
- .section __DWARF,__debug_line,regular,debug
-Lsection_line:
-Lline_table_start0:
diff --git a/test/tools/llvm-dwarfdump/X86/apple_names_verify_data.s b/test/tools/llvm-dwarfdump/X86/apple_names_verify_data.s
new file mode 100644
index 0000000000000..6d548543e4b9a
--- /dev/null
+++ b/test/tools/llvm-dwarfdump/X86/apple_names_verify_data.s
@@ -0,0 +1,64 @@
+# RUN: llvm-mc %s -filetype obj -triple x86_64-apple-darwin -o - \
+# RUN: | not llvm-dwarfdump -verify - \
+# RUN: | FileCheck %s
+
+# CHECK: Verifying .apple_names...
+# CHECK-NEXT: error: Bucket[0] has invalid hash index: 4294967294
+# CHECK-NEXT: error: Hash[0] has invalid HashData offset: 0x000000b4
+# CHECK-NEXT: error: .apple_names Bucket[1] Hash[1] = 0x0002b60f Str[0] = 0x0000005a DIE[0] = 0x00000001 is not a valid DIE offset for "j".
+
+# This test is meant to verify that the -verify option
+# in llvm-dwarfdump, correctly identifies
+# an invalid hash index for bucket[0] in the .apple_names section,
+# an invalid HashData offset for Hash[0], as well as
+# an invalid DIE offset in the .debug_info section.
+# We're reading an invalid DIE due to the incorrect interpretation of DW_FORM for the DIE.
+# Instead of DW_FORM_data4 the Atom[0].form is: DW_FORM_flag_present.
+
+ .section __TEXT,__text,regular,pure_instructions
+ .file 1 "basic.c"
+ .comm _i,4,2 ## @i
+ .comm _j,4,2 ## @j
+ .section __DWARF,__debug_str,regular,debug
+Linfo_string:
+ .asciz "Apple LLVM version 8.1.0 (clang-802.0.35)" ## string offset=0
+ .asciz "basic.c" ## string offset=42
+ .asciz "/Users/sgravani/Development/tests" ## string offset=50
+ .asciz "i" ## string offset=84
+ .asciz "int" ## string offset=86
+ .asciz "j" ## string offset=90
+ .section __DWARF,__debug_info,regular,debug
+Lsection_info:
+ .section __DWARF,__apple_names,regular,debug
+Lnames_begin:
+ .long 1212240712 ## Header Magic
+ .short 1 ## Header Version
+ .short 0 ## Header Hash Function
+ .long 2 ## Header Bucket Count
+ .long 2 ## Header Hash Count
+ .long 12 ## Header Data Length
+ .long 0 ## HeaderData Die Offset Base
+ .long 1 ## HeaderData Atom Count
+ .short 1 ## DW_ATOM_die_offset
+ .short 25 ## DW_FORM_data4 -- error: .apple_names Bucket[1] Hash[1] = 0x0002b60f Str[0] = 0x0000005a DIE[0] = 0x00000001 is not a valid DIE offset for "j".
+ .long -2 ## Bucket 0 -- error: Bucket[0] has invalid hash index: 4294967294
+ .long 1 ## Bucket 1
+ .long 177678 ## Hash in Bucket 0
+ .long 177679 ## Hash in Bucket 1
+ .long Lsection_line ## Offset in Bucket 0 -- error: Hash[0] has invalid HashData offset: 0x000000b4
+ .long LNames1-Lnames_begin ## Offset in Bucket 1
+LNames0:
+ .long 84 ## i
+ .long 1 ## Num DIEs
+ .long 30
+ .long 0
+LNames1:
+ .long 90 ## j
+ .long 1 ## Num DIEs
+ .long 58
+ .long 0
+
+.subsections_via_symbols
+ .section __DWARF,__debug_line,regular,debug
+Lsection_line:
+Lline_table_start0:
diff --git a/test/tools/llvm-dwarfdump/X86/apple_names_verify_form.s b/test/tools/llvm-dwarfdump/X86/apple_names_verify_form.s
new file mode 100644
index 0000000000000..ed4bf57069ced
--- /dev/null
+++ b/test/tools/llvm-dwarfdump/X86/apple_names_verify_form.s
@@ -0,0 +1,58 @@
+# RUN: llvm-mc %s -filetype obj -triple x86_64-apple-darwin -o - \
+# RUN: | not llvm-dwarfdump -verify - \
+# RUN: | FileCheck %s
+
+# CHECK: Verifying .apple_names...
+# CHECK-NEXT: error: unsupported form; failed to read HashData
+
+# This test is meant to verify that the -verify option
+# in llvm-dwarfdump, correctly identifies that Atom[0].form is unsupported.
+# As a result, the hashdata cannot be read.
+
+ .section __TEXT,__text,regular,pure_instructions
+ .file 1 "basic.c"
+ .comm _i,4,2 ## @i
+ .comm _j,4,2 ## @j
+ .section __DWARF,__debug_str,regular,debug
+Linfo_string:
+ .asciz "Apple LLVM version 8.1.0 (clang-802.0.35)" ## string offset=0
+ .asciz "basic.c" ## string offset=42
+ .asciz "/Users/sgravani/Development/tests" ## string offset=50
+ .asciz "i" ## string offset=84
+ .asciz "int" ## string offset=86
+ .asciz "j" ## string offset=90
+ .section __DWARF,__debug_info,regular,debug
+Lsection_info:
+ .section __DWARF,__apple_names,regular,debug
+Lnames_begin:
+ .long 1212240712 ## Header Magic
+ .short 1 ## Header Version
+ .short 0 ## Header Hash Function
+ .long 2 ## Header Bucket Count
+ .long 2 ## Header Hash Count
+ .long 12 ## Header Data Length
+ .long 0 ## HeaderData Die Offset Base
+ .long 1 ## HeaderData Atom Count
+ .short 1 ## DW_ATOM_die_offset
+ .short 400 ## DW_FORM_data4 -- error: unsupported form; failed to read HashData
+ .long 0 ## Bucket 0
+ .long 1 ## Bucket 1
+ .long 177678 ## Hash in Bucket 0
+ .long 177679 ## Hash in Bucket 1
+ .long LNames0-Lnames_begin ## Offset in Bucket 0
+ .long LNames1-Lnames_begin ## Offset in Bucket 1
+LNames0:
+ .long 84 ## i
+ .long 1 ## Num DIEs
+ .long 30
+ .long 0
+LNames1:
+ .long 90 ## j
+ .long 1 ## Num DIEs
+ .long 58
+ .long 0
+
+.subsections_via_symbols
+ .section __DWARF,__debug_line,regular,debug
+Lsection_line:
+Lline_table_start0:
diff --git a/test/tools/llvm-dwarfdump/X86/apple_names_verify_num_atoms.s b/test/tools/llvm-dwarfdump/X86/apple_names_verify_num_atoms.s
new file mode 100644
index 0000000000000..dffb39c20f085
--- /dev/null
+++ b/test/tools/llvm-dwarfdump/X86/apple_names_verify_num_atoms.s
@@ -0,0 +1,59 @@
+# RUN: llvm-mc %s -filetype obj -triple x86_64-apple-darwin -o - \
+# RUN: | not llvm-dwarfdump -verify - \
+# RUN: | FileCheck %s
+
+# CHECK: Verifying .apple_names...
+# CHECK-NEXT: error: no atoms; failed to read HashData
+
+# This test is meant to verify that the -verify option
+# in llvm-dwarfdump, correctly identifies that there is not Atom.
+# As a result, the hashdata cannot be read.
+
+ .section __TEXT,__text,regular,pure_instructions
+ .file 1 "basic.c"
+ .comm _i,4,2 ## @i
+ .comm _j,4,2 ## @j
+ .section __DWARF,__debug_str,regular,debug
+Linfo_string:
+ .asciz "Apple LLVM version 8.1.0 (clang-802.0.35)" ## string offset=0
+ .asciz "basic.c" ## string offset=42
+ .asciz "/Users/sgravani/Development/tests" ## string offset=50
+ .asciz "i" ## string offset=84
+ .asciz "int" ## string offset=86
+ .asciz "j" ## string offset=90
+
+ .section __DWARF,__debug_info,regular,debug
+Lsection_info:
+ .section __DWARF,__apple_names,regular,debug
+Lnames_begin:
+ .long 1212240712 ## Header Magic
+ .short 1 ## Header Version
+ .short 0 ## Header Hash Function
+ .long 2 ## Header Bucket Count
+ .long 2 ## Header Hash Count
+ .long 12 ## Header Data Length
+ .long 0 ## HeaderData Die Offset Base
+ .long 0 ## HeaderData Atom Count -- error: no atoms; failed to read HashData
+ .short 1 ## DW_ATOM_die_offset
+ .short 6 ## DW_FORM_data4
+ .long 0 ## Bucket 0
+ .long 1 ## Bucket 1
+ .long 177678 ## Hash in Bucket 0
+ .long 177679 ## Hash in Bucket 1
+ .long LNames0-Lnames_begin ## Offset in Bucket 0
+ .long LNames1-Lnames_begin ## Offset in Bucket 1
+LNames0:
+ .long 84 ## i
+ .long 1 ## Num DIEs
+ .long 30
+ .long 0
+LNames1:
+ .long 90 ## j
+ .long 1 ## Num DIEs
+ .long 58
+ .long 0
+
+.subsections_via_symbols
+ .section __DWARF,__debug_line,regular,debug
+Lsection_line:
+Lline_table_start0:
diff --git a/test/tools/llvm-dwarfdump/X86/no_apple_names_verify.s b/test/tools/llvm-dwarfdump/X86/no_apple_names_verify.s
new file mode 100644
index 0000000000000..76606bd40adde
--- /dev/null
+++ b/test/tools/llvm-dwarfdump/X86/no_apple_names_verify.s
@@ -0,0 +1,33 @@
+# RUN: llvm-mc %s -filetype obj -triple x86_64-apple-darwin -o - \
+# RUN: | llvm-dwarfdump -verify - \
+# RUN: | FileCheck %s
+
+# CHECK-NOT: Verifying .apple_names...
+
+# This test is meant to verify that the -verify option
+# in llvm-dwarfdump doesn't produce any .apple_names related
+# output when there's no such section in the object.
+# The test was manually modified to exclude the
+# .apple_names section from the apple_names_verify_num_atoms.s
+# test file in the same directory.
+
+ .section __TEXT,__text,regular,pure_instructions
+ .file 1 "basic.c"
+ .comm _i,4,2 ## @i
+ .comm _j,4,2 ## @j
+ .section __DWARF,__debug_str,regular,debug
+Linfo_string:
+ .asciz "Apple LLVM version 8.1.0 (clang-802.0.35)" ## string offset=0
+ .asciz "basic.c" ## string offset=42
+ .asciz "/Users/sgravani/Development/tests" ## string offset=50
+ .asciz "i" ## string offset=84
+ .asciz "int" ## string offset=86
+ .asciz "j" ## string offset=90
+
+ .section __DWARF,__debug_info,regular,debug
+Lsection_info:
+
+.subsections_via_symbols
+ .section __DWARF,__debug_line,regular,debug
+Lsection_line:
+Lline_table_start0:
diff --git a/test/tools/llvm-dwarfdump/X86/no_apple_names_verify_buckets.s b/test/tools/llvm-dwarfdump/X86/no_apple_names_verify_buckets.s
deleted file mode 100644
index 472ff71794c66..0000000000000
--- a/test/tools/llvm-dwarfdump/X86/no_apple_names_verify_buckets.s
+++ /dev/null
@@ -1,174 +0,0 @@
-# RUN: llvm-mc %s -filetype obj -triple x86_64-apple-darwin -o - \
-# RUN: | not llvm-dwarfdump -verify - \
-# RUN: | FileCheck %s
-
-# CHECK-NOT: Verifying .apple_names...
-
-# This test is meant to verify that the -verify option
-# in llvm-dwarfdump doesn't produce any .apple_names related
-# output when there's no such section int he object.
-# The test was manually modified to exclude the
-# .apple_names section from the apple_names_verify_buckets.s
-# test file in the same directory.
-
- .section __TEXT,__text,regular,pure_instructions
- .file 1 "basic.c"
- .comm _i,4,2 ## @i
- .section __DWARF,__debug_str,regular,debug
-Linfo_string:
- .asciz "basic.c" ## string offset=42
- .asciz "i" ## string offset=84
- .asciz "int" ## string offset=86
- .section __DWARF,__debug_loc,regular,debug
-Lsection_debug_loc:
- .section __DWARF,__debug_abbrev,regular,debug
-Lsection_abbrev:
- .byte 1 ## Abbreviation Code
- .byte 17 ## DW_TAG_compile_unit
- .byte 1 ## DW_CHILDREN_yes
- .byte 37 ## DW_AT_producer
- .byte 14 ## DW_FORM_strp
- .byte 19 ## DW_AT_language
- .byte 5 ## DW_FORM_data2
- .byte 3 ## DW_AT_name
- .byte 14 ## DW_FORM_strp
- .byte 16 ## DW_AT_stmt_list
- .byte 23 ## DW_FORM_sec_offset
- .byte 27 ## DW_AT_comp_dir
- .byte 14 ## DW_FORM_strp
- .byte 0 ## EOM(1)
- .byte 0 ## EOM(2)
- .byte 2 ## Abbreviation Code
- .byte 52 ## DW_TAG_variable
- .byte 0 ## DW_CHILDREN_no
- .byte 3 ## DW_AT_name
- .byte 14 ## DW_FORM_strp
- .byte 73 ## DW_AT_type
- .byte 19 ## DW_FORM_ref4
- .byte 63 ## DW_AT_external
- .byte 25 ## DW_FORM_flag_present
- .byte 58 ## DW_AT_decl_file
- .byte 11 ## DW_FORM_data1
- .byte 59 ## DW_AT_decl_line
- .byte 11 ## DW_FORM_data1
- .byte 2 ## DW_AT_location
- .byte 24 ## DW_FORM_exprloc
- .byte 0 ## EOM(1)
- .byte 0 ## EOM(2)
- .byte 3 ## Abbreviation Code
- .byte 36 ## DW_TAG_base_type
- .byte 0 ## DW_CHILDREN_no
- .byte 3 ## DW_AT_name
- .byte 14 ## DW_FORM_strp
- .byte 62 ## DW_AT_encoding
- .byte 11 ## DW_FORM_data1
- .byte 11 ## DW_AT_byte_size
- .byte 11 ## DW_FORM_data1
- .byte 0 ## EOM(1)
- .byte 0 ## EOM(2)
- .byte 0 ## EOM(3)
- .section __DWARF,__debug_info,regular,debug
-Lsection_info:
-Lcu_begin0:
- .long 55 ## Length of Unit
- .short 4 ## DWARF version number
-Lset0 = Lsection_abbrev-Lsection_abbrev ## Offset Into Abbrev. Section
- .long Lset0
- .byte 8 ## Address Size (in bytes)
- .byte 1 ## Abbrev [1] 0xb:0x30 DW_TAG_compile_unit
- .long 0 ## DW_AT_producer
- .short 12 ## DW_AT_language
- .long 42 ## DW_AT_name
-Lset1 = Lline_table_start0-Lsection_line ## DW_AT_stmt_list
- .long Lset1
- .long 50 ## DW_AT_comp_dir
- .byte 2 ## Abbrev [2] 0x1e:0x15 DW_TAG_variable
- .long 84 ## DW_AT_name
- .long 51 ## DW_AT_type
- ## DW_AT_external
- .byte 1 ## DW_AT_decl_file
- .byte 1 ## DW_AT_decl_line
- .byte 9 ## DW_AT_location
- .byte 3
- .quad _i
- .byte 3 ## Abbrev [3] 0x33:0x7 DW_TAG_base_type
- .long 86 ## DW_AT_name
- .byte 5 ## DW_AT_encoding
- .byte 4 ## DW_AT_byte_size
- .byte 0 ## End Of Children Mark
- .section __DWARF,__debug_ranges,regular,debug
-Ldebug_range:
- .section __DWARF,__debug_macinfo,regular,debug
-Ldebug_macinfo:
-Lcu_macro_begin0:
- .byte 0 ## End Of Macro List Mark
- .section __DWARF,__apple_objc,regular,debug
-Lobjc_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 0 ## Header Hash Count
- .long 12 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 1 ## HeaderData Atom Count
- .short 1 ## DW_ATOM_die_offset
- .short 6 ## DW_FORM_data4
- .long -1 ## Bucket 0
- .section __DWARF,__apple_namespac,regular,debug
-Lnamespac_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 0 ## Header Hash Count
- .long 12 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 1 ## HeaderData Atom Count
- .short 1 ## DW_ATOM_die_offset
- .short 6 ## DW_FORM_data4
- .long -1 ## Bucket 0
- .section __DWARF,__apple_types,regular,debug
-Ltypes_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 1 ## Header Hash Count
- .long 20 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 3 ## HeaderData Atom Count
- .short 1 ## DW_ATOM_die_offset
- .short 6 ## DW_FORM_data4
- .short 3 ## DW_ATOM_die_tag
- .short 5 ## DW_FORM_data2
- .short 4 ## DW_ATOM_type_flags
- .short 11 ## DW_FORM_data1
- .long 0 ## Bucket 0
- .long 193495088 ## Hash in Bucket 0
- .long Ltypes0-Ltypes_begin ## Offset in Bucket 0
-Ltypes0:
- .long 86 ## int
- .long 1 ## Num DIEs
- .long 51
- .short 36
- .byte 0
- .long 0
- .section __DWARF,__apple_exttypes,regular,debug
-Lexttypes_begin:
- .long 1212240712 ## Header Magic
- .short 1 ## Header Version
- .short 0 ## Header Hash Function
- .long 1 ## Header Bucket Count
- .long 0 ## Header Hash Count
- .long 12 ## Header Data Length
- .long 0 ## HeaderData Die Offset Base
- .long 1 ## HeaderData Atom Count
- .short 7 ## DW_ATOM_ext_types
- .short 6 ## DW_FORM_data4
- .long -1 ## Bucket 0
-
-.subsections_via_symbols
- .section __DWARF,__debug_line,regular,debug
-Lsection_line:
-Lline_table_start0:
diff --git a/test/tools/llvm-nm/X86/demangle.ll b/test/tools/llvm-nm/X86/demangle.ll
new file mode 100644
index 0000000000000..283e604046a8c
--- /dev/null
+++ b/test/tools/llvm-nm/X86/demangle.ll
@@ -0,0 +1,37 @@
+; RUN: llc -filetype=obj -mtriple=x86_64-pc-linux -o %t.o %s
+; RUN: llvm-nm %t.o | FileCheck --check-prefix="MANGLED" %s
+; RUN: llvm-nm -C %t.o | FileCheck --check-prefix="DEMANGLED" %s
+; RUN: llvm-nm --demangle %t.o | FileCheck --check-prefix="DEMANGLED" %s
+
+; RUN: llc -filetype=obj -mtriple=x86_64-apple-darwin9 -o %t.macho %s
+; RUN: llvm-nm %t.macho | FileCheck --check-prefix="MACHO-MANGLED" %s
+; RUN: llvm-nm -C %t.macho | FileCheck --check-prefix="DEMANGLED" %s
+
+; RUN: llc -filetype=obj -mtriple=x86_64-pc-win32 -o %t.coff %s
+; RUN: llvm-nm %t.coff | FileCheck --check-prefix="COFF-MANGLED" %s
+; RUN: llvm-nm -C %t.coff | FileCheck --check-prefix="COFF-DEMANGLED" %s
+
+define i32 @_Z3fooi(i32) #0 {
+entry:
+ ret i32 1
+}
+
+define float @_Z3barf(float) #0 {
+entry:
+ ret float 0.000000e+00
+}
+
+; MANGLED: 0000000000000010 T _Z3barf
+; MANGLED: 0000000000000000 T _Z3fooi
+
+; MACHO-MANGLED: 0000000000000010 T __Z3barf
+; MACHO-MANGLED: 0000000000000000 T __Z3fooi
+
+; COFF-MANGLED: 00000010 T _Z3barf
+; COFF-MANGLED: 00000000 T _Z3fooi
+
+; DEMANGLED: 0000000000000010 T bar(float)
+; DEMANGLED: 0000000000000000 T foo(int)
+
+; COFF-DEMANGLED: 00000010 T bar(float)
+; COFF-DEMANGLED: 00000000 T foo(int)
diff --git a/test/tools/llvm-nm/wasm/weak-symbols.yaml b/test/tools/llvm-nm/wasm/weak-symbols.yaml
index 682a874ea5905..d46ca1afe8ead 100644
--- a/test/tools/llvm-nm/wasm/weak-symbols.yaml
+++ b/test/tools/llvm-nm/wasm/weak-symbols.yaml
@@ -33,6 +33,8 @@ Sections:
Index: 0x00000002
- Type: CUSTOM
Name: linking
+ DataSize: 0
+ DataAlignment: 2
SymbolInfo:
- Name: weak_global_func
Flags: 1
diff --git a/test/tools/llvm-objdump/ARM/invalid-instruction.s b/test/tools/llvm-objdump/ARM/invalid-instruction.s
new file mode 100644
index 0000000000000..a63300cadffea
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/invalid-instruction.s
@@ -0,0 +1,9 @@
+@RUN: llvm-mc -triple arm-unknown-linux -filetype=obj %s | llvm-objdump -d - | FileCheck %s
+
+.text
+ b l0
+ .inst 0xffffffff
+l0:
+
+@CHECK: 0: 00 00 00 ea b #0 <l0>
+@CHECK-NEXT: 4: ff ff ff ff <unknown>
diff --git a/test/tools/llvm-objdump/WebAssembly/lit.local.cfg b/test/tools/llvm-objdump/WebAssembly/lit.local.cfg
new file mode 100644
index 0000000000000..0dd8c920ff1eb
--- /dev/null
+++ b/test/tools/llvm-objdump/WebAssembly/lit.local.cfg
@@ -0,0 +1,2 @@
+if 'WebAssembly' not in config.root.targets:
+ config.unsupported = True
diff --git a/test/tools/llvm-objdump/WebAssembly/relocations.test b/test/tools/llvm-objdump/WebAssembly/relocations.test
new file mode 100644
index 0000000000000..07a167c550f97
--- /dev/null
+++ b/test/tools/llvm-objdump/WebAssembly/relocations.test
@@ -0,0 +1,8 @@
+; RUN: llc -mtriple=wasm32-unknown-unknown-wasm -filetype=obj %s -o - | llvm-objdump -r - | FileCheck %s
+
+@foo1 = hidden global i32 1, align 4
+@foo2 = hidden global i32 1, align 4
+@bar = hidden global i32* @foo2, align 4
+
+; CHECK: RELOCATION RECORDS FOR [DATA]:
+; CHECK-NEXT: 0000000e R_WEBASSEMBLY_GLOBAL_ADDR_I32 1+0
diff --git a/test/tools/llvm-pdbdump/partial-type-stream.test b/test/tools/llvm-pdbdump/partial-type-stream.test
new file mode 100644
index 0000000000000..3a853c3914506
--- /dev/null
+++ b/test/tools/llvm-pdbdump/partial-type-stream.test
@@ -0,0 +1,30 @@
+; RUN: llvm-pdbutil dump -type-index=0x1019 %p/Inputs/ClassLayoutTest.pdb \
+; RUN: | FileCheck --check-prefix=NODEPS %s
+; RUN: llvm-pdbutil dump -type-index=0x1019 -dependents %p/Inputs/ClassLayoutTest.pdb \
+; RUN: | FileCheck --check-prefix=DEPS %s
+
+
+NODEPS: Types (TPI Stream)
+NODEPS-NEXT: ============================================================
+NODEPS-NEXT: Showing 1 records.
+NODEPS-NEXT: 0x1019 | LF_MFUNCTION [size = 28]
+NODEPS-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x100E
+NODEPS-NEXT: class type = 0x1017, this type = 0x1018, this adjust = 0
+NODEPS-NEXT: calling conv = thiscall, options = None
+
+
+DEPS: Types (TPI Stream)
+DEPS-NEXT: ============================================================
+DEPS-NEXT: Showing 1 records and their dependents (4 records total)
+DEPS-NEXT: 0x100E | LF_ARGLIST [size = 8]
+DEPS-NEXT: 0x1017 | LF_CLASS [size = 60]
+DEPS-NEXT: class name: `MembersTest::A`
+DEPS-NEXT: unique name: `.?AVA@MembersTest@@`
+DEPS-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
+DEPS-NEXT: options: forward ref | has unique name
+DEPS-NEXT: 0x1018 | LF_POINTER [size = 12]
+DEPS-NEXT: referent = 0x1017, mode = pointer, opts = const, kind = ptr32
+DEPS-NEXT: 0x1019 | LF_MFUNCTION [size = 28]
+DEPS-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x100E
+DEPS-NEXT: class type = 0x1017, this type = 0x1018, this adjust = 0
+DEPS-NEXT: calling conv = thiscall, options = None
diff --git a/test/tools/llvm-readobj/Inputs/trivial.obj.coff-arm64 b/test/tools/llvm-readobj/Inputs/trivial.obj.coff-arm64
new file mode 100644
index 0000000000000..b494f6ade24c8
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/trivial.obj.coff-arm64
Binary files differ
diff --git a/test/tools/llvm-readobj/file-headers.test b/test/tools/llvm-readobj/file-headers.test
index 47fb24de1b603..6bc9714f2037e 100644
--- a/test/tools/llvm-readobj/file-headers.test
+++ b/test/tools/llvm-readobj/file-headers.test
@@ -1,5 +1,7 @@
RUN: llvm-readobj -h %p/Inputs/trivial.obj.coff-arm \
RUN: | FileCheck %s -check-prefix COFF-ARM
+RUN: llvm-readobj -h %p/Inputs/trivial.obj.coff-arm64 \
+RUN: | FileCheck %s -check-prefix COFF-ARM64
RUN: llvm-readobj -h %p/Inputs/trivial.obj.coff-i386 \
RUN: | FileCheck %s -check-prefix COFF32
RUN: llvm-readobj -h %p/Inputs/trivial.obj.coff-x86-64 \
@@ -47,6 +49,21 @@ COFF-ARM-NEXT: Characteristics [ (0x0)
COFF-ARM-NEXT: ]
COFF-ARM-NEXT: }
+COFF-ARM64: File: {{(.*[/\\])?}}trivial.obj.coff-arm64
+COFF-ARM64-NEXT: Format: COFF-ARM64
+COFF-ARM64-NEXT: Arch: aarch64
+COFF-ARM64-NEXT: AddressSize: 64bit
+COFF-ARM64-NEXT: ImageFileHeader {
+COFF-ARM64-NEXT: Machine: IMAGE_FILE_MACHINE_ARM64 (0xAA64)
+COFF-ARM64-NEXT: SectionCount: 1
+COFF-ARM64-NEXT: TimeDateStamp: 1970-01-01 00:00:00 (0x0)
+COFF-ARM64-NEXT: PointerToSymbolTable: 0x44
+COFF-ARM64-NEXT: SymbolCount: 3
+COFF-ARM64-NEXT: OptionalHeaderSize: 0
+COFF-ARM64-NEXT: Characteristics [ (0x0)
+COFF-ARM64-NEXT: ]
+COFF-ARM64-NEXT: }
+
COFF32: File: {{(.*[/\\])?}}trivial.obj.coff-i386
COFF32-NEXT: Format: COFF-i386
COFF32-NEXT: Arch: i386
@@ -238,6 +255,7 @@ PE32-NEXT: IMAGE_FILE_EXECUTABLE_IMAGE (0x2)
PE32-NEXT: ]
PE32-NEXT: }
PE32-NEXT: ImageOptionalHeader {
+PE32-NEXT: Magic: 0x10B
PE32-NEXT: MajorLinkerVersion: 11
PE32-NEXT: MinorLinkerVersion: 0
PE32-NEXT: SizeOfCode: 512
diff --git a/test/tools/llvm-readobj/peplus.test b/test/tools/llvm-readobj/peplus.test
index 4d8d25db894c9..3619cd792d54a 100644
--- a/test/tools/llvm-readobj/peplus.test
+++ b/test/tools/llvm-readobj/peplus.test
@@ -16,6 +16,7 @@ CHECK: IMAGE_FILE_LARGE_ADDRESS_AWARE (0x20)
CHECK: ]
CHECK: }
CHECK: ImageOptionalHeader {
+CHECK: Magic: 0x20B
CHECK: MajorLinkerVersion: 11
CHECK: MinorLinkerVersion: 0
CHECK: SizeOfCode: 512
diff --git a/test/tools/llvm-readobj/symbols.test b/test/tools/llvm-readobj/symbols.test
index 1037c28660238..da8a70b031aba 100644
--- a/test/tools/llvm-readobj/symbols.test
+++ b/test/tools/llvm-readobj/symbols.test
@@ -75,17 +75,21 @@ WASM: Symbols [
WASM-NEXT: Symbol {
WASM-NEXT: Name: bar
WASM-NEXT: Type: GLOBAL_IMPORT (0x2)
+WASM-NEXT: Flags: 0x0
WASM-NEXT: }
WASM-NEXT: Symbol {
WASM-NEXT: Name: baz
WASM-NEXT: Type: GLOBAL_IMPORT (0x2)
+WASM-NEXT: Flags: 0x0
WASM-NEXT: }
WASM-NEXT: Symbol {
WASM-NEXT: Name: foo
WASM-NEXT: Type: FUNCTION_EXPORT (0x1)
+WASM-NEXT: Flags: 0x0
WASM-NEXT: }
WASM-NEXT: Symbol {
WASM-NEXT: Name: foo
WASM-NEXT: Type: DEBUG_FUNCTION_NAME (0x4)
+WASM-NEXT: Flags: 0x0
WASM-NEXT: }
WASM-NEXT: ]