summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll70
-rw-r--r--test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll58
-rw-r--r--test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll540
-rw-r--r--test/Analysis/ValueTracking/memory-dereferenceable.ll13
-rw-r--r--test/Assembler/dicompileunit.ll (renamed from test/Assembler/mdcompileunit.ll)0
-rw-r--r--test/Assembler/diexpression.ll (renamed from test/Assembler/mdexpression.ll)0
-rw-r--r--test/Assembler/difile-escaped-chars.ll (renamed from test/Assembler/mdfile-escaped-chars.ll)0
-rw-r--r--test/Assembler/diglobalvariable.ll (renamed from test/Assembler/mdglobalvariable.ll)0
-rw-r--r--test/Assembler/diimportedentity.ll (renamed from test/Assembler/mdimportedentity.ll)0
-rw-r--r--test/Assembler/dilexicalblock.ll (renamed from test/Assembler/mdlexicalblock.ll)0
-rw-r--r--test/Assembler/dilocalvariable-arg-large.ll10
-rw-r--r--test/Assembler/dilocalvariable.ll (renamed from test/Assembler/mdlocalvariable.ll)0
-rw-r--r--test/Assembler/dilocation.ll (renamed from test/Assembler/mdlocation.ll)0
-rw-r--r--test/Assembler/dinamespace.ll (renamed from test/Assembler/mdnamespace.ll)0
-rw-r--r--test/Assembler/diobjcproperty.ll (renamed from test/Assembler/mdobjcproperty.ll)0
-rw-r--r--test/Assembler/disubprogram.ll (renamed from test/Assembler/mdsubprogram.ll)0
-rw-r--r--test/Assembler/disubrange-empty-array.ll (renamed from test/Assembler/mdsubrange-empty-array.ll)0
-rw-r--r--test/Assembler/disubroutinetype.ll (renamed from test/Assembler/mdsubroutinetype.ll)0
-rw-r--r--test/Assembler/ditemplateparameter.ll (renamed from test/Assembler/mdtemplateparameter.ll)0
-rw-r--r--test/Assembler/ditype-large-values.ll (renamed from test/Assembler/mdtype-large-values.ll)0
-rw-r--r--test/Assembler/getelementptr.ll4
-rw-r--r--test/Assembler/invalid-dicompileunit-language-bad.ll (renamed from test/Assembler/invalid-mdcompileunit-language-bad.ll)0
-rw-r--r--test/Assembler/invalid-dicompileunit-language-overflow.ll (renamed from test/Assembler/invalid-mdcompileunit-language-overflow.ll)0
-rw-r--r--test/Assembler/invalid-dicompileunit-missing-language.ll (renamed from test/Assembler/invalid-mdcompileunit-missing-language.ll)0
-rw-r--r--test/Assembler/invalid-dicompileunit-null-file.ll (renamed from test/Assembler/invalid-mdcompileunit-null-file.ll)0
-rw-r--r--test/Assembler/invalid-dicompositetype-missing-tag.ll (renamed from test/Assembler/invalid-mdcompositetype-missing-tag.ll)0
-rw-r--r--test/Assembler/invalid-diderivedtype-missing-basetype.ll (renamed from test/Assembler/invalid-mdderivedtype-missing-basetype.ll)0
-rw-r--r--test/Assembler/invalid-diderivedtype-missing-tag.ll (renamed from test/Assembler/invalid-mdderivedtype-missing-tag.ll)0
-rw-r--r--test/Assembler/invalid-dienumerator-missing-name.ll (renamed from test/Assembler/invalid-mdenumerator-missing-name.ll)0
-rw-r--r--test/Assembler/invalid-dienumerator-missing-value.ll (renamed from test/Assembler/invalid-mdenumerator-missing-value.ll)0
-rw-r--r--test/Assembler/invalid-diexpression-large.ll (renamed from test/Assembler/invalid-mdexpression-large.ll)0
-rw-r--r--test/Assembler/invalid-diexpression-verify.ll (renamed from test/Assembler/invalid-mdexpression-verify.ll)0
-rw-r--r--test/Assembler/invalid-difile-missing-directory.ll (renamed from test/Assembler/invalid-mdfile-missing-directory.ll)0
-rw-r--r--test/Assembler/invalid-difile-missing-filename.ll (renamed from test/Assembler/invalid-mdfile-missing-filename.ll)0
-rw-r--r--test/Assembler/invalid-diglobalvariable-empty-name.ll (renamed from test/Assembler/invalid-mdglobalvariable-empty-name.ll)0
-rw-r--r--test/Assembler/invalid-diglobalvariable-missing-name.ll (renamed from test/Assembler/invalid-mdglobalvariable-missing-name.ll)0
-rw-r--r--test/Assembler/invalid-diimportedentity-missing-scope.ll (renamed from test/Assembler/invalid-mdimportedentity-missing-scope.ll)0
-rw-r--r--test/Assembler/invalid-diimportedentity-missing-tag.ll (renamed from test/Assembler/invalid-mdimportedentity-missing-tag.ll)0
-rw-r--r--test/Assembler/invalid-dilexicalblock-missing-scope.ll (renamed from test/Assembler/invalid-mdlexicalblock-missing-scope.ll)0
-rw-r--r--test/Assembler/invalid-dilexicalblock-null-scope.ll (renamed from test/Assembler/invalid-mdlexicalblock-null-scope.ll)0
-rw-r--r--test/Assembler/invalid-dilexicalblockfile-missing-discriminator.ll (renamed from test/Assembler/invalid-mdlexicalblockfile-missing-discriminator.ll)0
-rw-r--r--test/Assembler/invalid-dilexicalblockfile-missing-scope.ll (renamed from test/Assembler/invalid-mdlexicalblockfile-missing-scope.ll)0
-rw-r--r--test/Assembler/invalid-dilexicalblockfile-null-scope.ll (renamed from test/Assembler/invalid-mdlexicalblockfile-null-scope.ll)0
-rw-r--r--test/Assembler/invalid-dilocalvariable-arg-large.ll6
-rw-r--r--test/Assembler/invalid-dilocalvariable-arg-negative.ll6
-rw-r--r--test/Assembler/invalid-dilocalvariable-missing-scope.ll (renamed from test/Assembler/invalid-mdlocalvariable-missing-scope.ll)0
-rw-r--r--test/Assembler/invalid-dilocalvariable-missing-tag.ll (renamed from test/Assembler/invalid-mdlocalvariable-missing-tag.ll)0
-rw-r--r--test/Assembler/invalid-dilocalvariable-null-scope.ll (renamed from test/Assembler/invalid-mdlocalvariable-null-scope.ll)0
-rw-r--r--test/Assembler/invalid-dilocation-field-bad.ll (renamed from test/Assembler/invalid-mdlocation-field-bad.ll)0
-rw-r--r--test/Assembler/invalid-dilocation-field-twice.ll (renamed from test/Assembler/invalid-mdlocation-field-twice.ll)0
-rw-r--r--test/Assembler/invalid-dilocation-missing-scope-2.ll (renamed from test/Assembler/invalid-mdlocation-missing-scope-2.ll)0
-rw-r--r--test/Assembler/invalid-dilocation-missing-scope.ll (renamed from test/Assembler/invalid-mdlocation-missing-scope.ll)0
-rw-r--r--test/Assembler/invalid-dilocation-null-scope.ll (renamed from test/Assembler/invalid-mdlocation-null-scope.ll)0
-rw-r--r--test/Assembler/invalid-dilocation-overflow-column.ll (renamed from test/Assembler/invalid-mdlocation-overflow-column.ll)0
-rw-r--r--test/Assembler/invalid-dilocation-overflow-line.ll (renamed from test/Assembler/invalid-mdlocation-overflow-line.ll)0
-rw-r--r--test/Assembler/invalid-dinamespace-missing-namespace.ll (renamed from test/Assembler/invalid-mdnamespace-missing-namespace.ll)0
-rw-r--r--test/Assembler/invalid-disubrange-count-large.ll (renamed from test/Assembler/invalid-mdsubrange-count-large.ll)0
-rw-r--r--test/Assembler/invalid-disubrange-count-missing.ll (renamed from test/Assembler/invalid-mdsubrange-count-missing.ll)0
-rw-r--r--test/Assembler/invalid-disubrange-count-negative.ll (renamed from test/Assembler/invalid-mdsubrange-count-negative.ll)0
-rw-r--r--test/Assembler/invalid-disubrange-lowerBound-max.ll (renamed from test/Assembler/invalid-mdsubrange-lowerBound-max.ll)0
-rw-r--r--test/Assembler/invalid-disubrange-lowerBound-min.ll (renamed from test/Assembler/invalid-mdsubrange-lowerBound-min.ll)0
-rw-r--r--test/Assembler/invalid-disubroutinetype-missing-types.ll (renamed from test/Assembler/invalid-mdsubroutinetype-missing-types.ll)0
-rw-r--r--test/Assembler/invalid-ditemplatetypeparameter-missing-type.ll (renamed from test/Assembler/invalid-mdtemplatetypeparameter-missing-type.ll)0
-rw-r--r--test/Assembler/invalid-ditemplatevalueparameter-missing-value.ll (renamed from test/Assembler/invalid-mdtemplatevalueparameter-missing-value.ll)0
-rw-r--r--test/Assembler/metadata.ll13
-rw-r--r--test/Bitcode/Inputs/invalid-alias-type-mismatch.bcbin0 -> 452 bytes
-rw-r--r--test/Bitcode/Inputs/invalid-metadata-not-followed-named-node.bcbin0 -> 878 bytes
-rw-r--r--test/Bitcode/Inputs/invalid-vector-length.bcbin0 -> 488 bytes
-rw-r--r--test/Bitcode/invalid.test15
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp.ll40
-rw-r--r--test/CodeGen/AArch64/arm64-named-reg-alloc.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-named-reg-notareg.ll2
-rw-r--r--test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll74
-rw-r--r--test/CodeGen/AArch64/minmax.ll11
-rw-r--r--test/CodeGen/AArch64/special-reg.ll48
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll14
-rw-r--r--test/CodeGen/ARM/build-attributes.ll6
-rw-r--r--test/CodeGen/ARM/ifcvt-callback.ll22
-rw-r--r--test/CodeGen/ARM/jump-table-islands-split.ll52
-rw-r--r--test/CodeGen/ARM/jump-table-islands.ll40
-rw-r--r--test/CodeGen/ARM/jumptable-label.ll4
-rw-r--r--test/CodeGen/ARM/ldrd.ll16
-rw-r--r--test/CodeGen/ARM/named-reg-alloc.ll2
-rw-r--r--test/CodeGen/ARM/named-reg-notareg.ll2
-rw-r--r--test/CodeGen/ARM/special-reg-acore.ll78
-rw-r--r--test/CodeGen/ARM/special-reg-mcore.ll143
-rw-r--r--test/CodeGen/ARM/special-reg.ll78
-rw-r--r--test/CodeGen/BPF/alu8.ll3
-rw-r--r--test/CodeGen/BPF/atomics.ll3
-rw-r--r--test/CodeGen/BPF/basictest.ll2
-rw-r--r--test/CodeGen/BPF/cc_args.ll3
-rw-r--r--test/CodeGen/BPF/cc_args_be.ll96
-rw-r--r--test/CodeGen/BPF/cc_ret.ll2
-rw-r--r--test/CodeGen/BPF/ex1.ll2
-rw-r--r--test/CodeGen/BPF/intrinsics.ll2
-rw-r--r--test/CodeGen/BPF/load.ll2
-rw-r--r--test/CodeGen/BPF/loops.ll2
-rw-r--r--test/CodeGen/BPF/sanity.ll2
-rw-r--r--test/CodeGen/BPF/setcc.ll2
-rw-r--r--test/CodeGen/BPF/shifts.ll3
-rw-r--r--test/CodeGen/BPF/sockex2.ll3
-rw-r--r--test/CodeGen/Generic/stop-after.ll3
-rw-r--r--test/CodeGen/Hexagon/args.ll8
-rw-r--r--test/CodeGen/Hexagon/calling-conv.ll73
-rw-r--r--test/CodeGen/Hexagon/cext-valid-packet1.ll1
-rw-r--r--test/CodeGen/Hexagon/cext-valid-packet2.ll1
-rw-r--r--test/CodeGen/Hexagon/compound.ll17
-rw-r--r--test/CodeGen/Hexagon/dualstore.ll15
-rw-r--r--test/CodeGen/Hexagon/duplex.ll7
-rw-r--r--test/CodeGen/Hexagon/relax.ll14
-rw-r--r--test/CodeGen/Hexagon/sube.ll4
-rw-r--r--test/CodeGen/MIR/lit.local.cfg2
-rw-r--r--test/CodeGen/MIR/llvm-ir-error-reported.mir22
-rw-r--r--test/CodeGen/MIR/llvmIR.mir32
-rw-r--r--test/CodeGen/MIR/llvmIRMissing.mir5
-rw-r--r--test/CodeGen/MIR/machine-function-missing-name.mir22
-rw-r--r--test/CodeGen/MIR/machine-function.mir24
-rw-r--r--test/CodeGen/Mips/Fast-ISel/bswap1.ll58
-rw-r--r--test/CodeGen/Mips/Fast-ISel/div1.ll55
-rw-r--r--test/CodeGen/Mips/Fast-ISel/memtest1.ll74
-rw-r--r--test/CodeGen/Mips/Fast-ISel/mul1.ll18
-rw-r--r--test/CodeGen/Mips/Fast-ISel/rem1.ll56
-rw-r--r--test/CodeGen/Mips/Fast-ISel/sel1.ll91
-rw-r--r--test/CodeGen/Mips/dynamic-stack-realignment.ll299
-rw-r--r--test/CodeGen/Mips/ehframe-indirect.ll26
-rw-r--r--test/CodeGen/Mips/emergency-spill-slot-near-fp.ll4
-rw-r--r--test/CodeGen/NVPTX/access-non-generic.ll16
-rw-r--r--test/CodeGen/NVPTX/bug21465.ll12
-rw-r--r--test/CodeGen/NVPTX/call-with-alloca-buffer.ll7
-rw-r--r--test/CodeGen/NVPTX/globals_init.ll23
-rw-r--r--test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll20
-rw-r--r--test/CodeGen/NVPTX/pr13291-i1-store.ll8
-rw-r--r--test/CodeGen/NVPTX/surf-read-cuda.ll8
-rw-r--r--test/CodeGen/NVPTX/tex-read-cuda.ll8
-rw-r--r--test/CodeGen/PowerPC/fma.ll83
-rw-r--r--test/CodeGen/PowerPC/vsx-fma-sp.ll167
-rw-r--r--test/CodeGen/R600/cgp-addressing-modes.ll242
-rw-r--r--test/CodeGen/R600/coalescer_remat.ll6
-rw-r--r--test/CodeGen/R600/codegen-prepare-addrmode-sext.ll10
-rw-r--r--test/CodeGen/R600/fmul.ll16
-rw-r--r--test/CodeGen/R600/half.ll543
-rw-r--r--test/CodeGen/R600/imm.ll2
-rw-r--r--test/CodeGen/R600/loop-address.ll5
-rw-r--r--test/CodeGen/R600/loop-idiom.ll4
-rw-r--r--test/CodeGen/R600/max.ll51
-rw-r--r--test/CodeGen/R600/min.ll51
-rw-r--r--test/CodeGen/R600/sext-in-reg.ll5
-rw-r--r--test/CodeGen/R600/si-vector-hang.ll3
-rw-r--r--test/CodeGen/R600/subreg-eliminate-dead.ll19
-rw-r--r--test/CodeGen/R600/trunc-store-f64-to-f16.ll56
-rw-r--r--test/CodeGen/R600/unroll.ll5
-rw-r--r--test/CodeGen/R600/wrong-transalu-pos-fix.ll7
-rw-r--r--test/CodeGen/Thumb2/constant-islands-jump-table.ll2
-rw-r--r--test/CodeGen/Thumb2/float-ops.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-tbh.ll12
-rw-r--r--test/CodeGen/X86/asm-reject-reg-type-mismatch.ll10
-rw-r--r--test/CodeGen/X86/avx-vperm2x128.ll2
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll390
-rw-r--r--test/CodeGen/X86/avx512-build-vector.ll10
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll45
-rw-r--r--test/CodeGen/X86/avx512-shuffle.ll336
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll7
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics.ll40
-rw-r--r--test/CodeGen/X86/buildvec-insertvec.ll55
-rw-r--r--test/CodeGen/X86/critical-anti-dep-breaker.ll3
-rw-r--r--test/CodeGen/X86/machine-cp.ll55
-rw-r--r--test/CodeGen/X86/pic.ll8
-rw-r--r--test/CodeGen/X86/pr23603.ll24
-rw-r--r--test/CodeGen/X86/pr23664.ll14
-rw-r--r--test/CodeGen/X86/recip-fastmath.ll4
-rw-r--r--test/CodeGen/X86/sibcall-win64.ll24
-rw-r--r--test/CodeGen/X86/sqrt-fastmath.ll2
-rw-r--r--test/CodeGen/X86/stack-folding-x86_64.ll51
-rw-r--r--test/CodeGen/X86/statepoint-far-call.ll22
-rw-r--r--test/CodeGen/X86/switch-or.ll21
-rw-r--r--test/CodeGen/X86/switch.ll15
-rw-r--r--test/CodeGen/X86/tail-call-got.ll18
-rw-r--r--test/CodeGen/X86/tailcallpic1.ll3
-rw-r--r--test/CodeGen/X86/tailcallpic3.ll73
-rw-r--r--test/CodeGen/X86/vec_fp_to_int.ll2
-rw-r--r--test/CodeGen/X86/vec_shift8.ll1016
-rw-r--r--test/CodeGen/X86/vector-ctpop.ll159
-rw-r--r--test/CodeGen/X86/vector-lzcnt-128.ll1915
-rw-r--r--test/CodeGen/X86/vector-lzcnt-256.ll1305
-rw-r--r--test/CodeGen/X86/vector-popcnt-128.ll462
-rw-r--r--test/CodeGen/X86/vector-popcnt-256.ll220
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v4.ll1
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v8.ll713
-rw-r--r--test/CodeGen/X86/vector-tzcnt-128.ll1788
-rw-r--r--test/CodeGen/X86/vector-tzcnt-256.ll1195
-rw-r--r--test/CodeGen/X86/vector-zext.ll10
-rw-r--r--test/CodeGen/X86/win32-eh-states.ll112
-rw-r--r--test/CodeGen/X86/win32-eh.ll59
-rwxr-xr-xtest/DebugInfo/Inputs/dwarfdump.elf-mips64-64-bit-dwarfbin0 -> 15638 bytes
-rw-r--r--test/DebugInfo/Inputs/invalid.elfbin0 -> 64 bytes
-rw-r--r--test/DebugInfo/Inputs/invalid.elf.21
-rw-r--r--test/DebugInfo/Inputs/invalid.elf.31
-rw-r--r--test/DebugInfo/Inputs/test-multiple-macho.obin0 -> 2452 bytes
-rw-r--r--test/DebugInfo/Inputs/test-simple-macho.obin0 -> 1944 bytes
-rw-r--r--test/DebugInfo/X86/expressions.ll110
-rw-r--r--test/DebugInfo/X86/inlined-indirect-value.ll81
-rw-r--r--test/DebugInfo/debuglineinfo-macho.test43
-rw-r--r--test/DebugInfo/debuglineinfo.test3
-rw-r--r--test/DebugInfo/dwarfdump-64-bit-dwarf.test15
-rw-r--r--test/DebugInfo/dwarfdump-invalid.test6
-rw-r--r--test/ExecutionEngine/MCJIT/cross-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/eh-lg-pic.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/eh-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/multi-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/stubs-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/test-global-init-nonzero-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/test-ptr-reloc-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/cross-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/eh-lg-pic.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/eh-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/multi-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/remote/cross-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/remote/multi-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/remote/test-global-init-nonzero-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/remote/test-ptr-reloc-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/stubs-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/test-global-init-nonzero-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/OrcMCJIT/test-ptr-reloc-sm-pic.ll2
-rw-r--r--test/ExecutionEngine/RuntimeDyld/Mips/ELF_Mips64r2N64_PIC_relocations.s164
-rw-r--r--test/ExecutionEngine/RuntimeDyld/Mips/ELF_O32_PIC_relocations.s50
-rw-r--r--test/ExecutionEngine/RuntimeDyld/Mips/Inputs/ExternalFunction.ll4
-rw-r--r--test/ExecutionEngine/RuntimeDyld/Mips/lit.local.cfg3
-rw-r--r--test/ExecutionEngine/RuntimeDyld/X86/MachO_i386_DynNoPIC_relocations.s4
-rw-r--r--test/Instrumentation/AddressSanitizer/do-not-instrument-llvm-metadata-darwin.ll12
-rw-r--r--test/Instrumentation/AddressSanitizer/instrument-dynamic-allocas.ll8
-rw-r--r--test/Instrumentation/AddressSanitizer/undecidable-dynamic-alloca-1.ll23
-rw-r--r--test/Instrumentation/InstrProfiling/PR23499.ll21
-rw-r--r--test/MC/AArch64/armv8.1a-atomic.s184
-rw-r--r--test/MC/AArch64/basic-a64-diagnostics.s4
-rw-r--r--test/MC/AArch64/basic-a64-instructions.s2
-rw-r--r--test/MC/AArch64/case-insen-reg-names.s8
-rw-r--r--test/MC/ARM/arm-elf-symver.s36
-rw-r--r--test/MC/ARM/directive-arch-armv2.s2
-rw-r--r--test/MC/ARM/directive-arch-armv2a.s2
-rw-r--r--test/MC/ARM/directive-arch-armv3.s2
-rw-r--r--test/MC/ARM/directive-arch-armv3m.s2
-rw-r--r--test/MC/ARM/directive-fpu-multiple.s18
-rw-r--r--test/MC/ARM/elf-movt.s8
-rw-r--r--test/MC/AsmParser/defsym.s20
-rw-r--r--test/MC/AsmParser/defsym_error1.s2
-rw-r--r--test/MC/AsmParser/defsym_error2.s2
-rw-r--r--test/MC/COFF/cross-section-relative.ll60
-rw-r--r--test/MC/COFF/cross-section-relative.s118
-rw-r--r--test/MC/Disassembler/AArch64/armv8.1a-atomic.txt87
-rw-r--r--test/MC/Disassembler/AArch64/basic-a64-instructions.txt2
-rw-r--r--test/MC/Disassembler/PowerPC/vsx.txt24
-rw-r--r--test/MC/ELF/alias.s18
-rw-r--r--test/MC/ELF/basic-elf-32.s14
-rw-r--r--test/MC/ELF/basic-elf-64.s10
-rw-r--r--test/MC/ELF/comdat-dup-group-name.s9
-rw-r--r--test/MC/ELF/comdat.s2
-rw-r--r--test/MC/ELF/common-error3.s5
-rw-r--r--test/MC/ELF/common-redeclare.s5
-rw-r--r--test/MC/ELF/empty.s4
-rw-r--r--test/MC/ELF/got.s11
-rw-r--r--test/MC/ELF/many-sections-2.s15
-rw-r--r--test/MC/ELF/many-sections.s15
-rw-r--r--test/MC/ELF/noexec.s12
-rw-r--r--test/MC/ELF/popsection.s21
-rw-r--r--test/MC/ELF/reloc-same-name-section.s4
-rw-r--r--test/MC/ELF/relocation-386.s23
-rw-r--r--test/MC/ELF/relocation.s17
-rw-r--r--test/MC/ELF/section-sym.s78
-rw-r--r--test/MC/ELF/section-sym2.s10
-rw-r--r--test/MC/ELF/strtab-suffix-opt.s2
-rw-r--r--test/MC/ELF/symver.s36
-rw-r--r--test/MC/ELF/undef.s36
-rw-r--r--test/MC/ELF/weakref-reloc.s32
-rw-r--r--test/MC/ELF/weakref.s20
-rw-r--r--test/MC/Hexagon/inst_add.ll6
-rw-r--r--test/MC/Hexagon/inst_cmp_eq.ll6
-rw-r--r--test/MC/Hexagon/inst_cmp_eqi.ll6
-rw-r--r--test/MC/Hexagon/inst_cmp_gt.ll6
-rw-r--r--test/MC/Hexagon/inst_cmp_gti.ll6
-rw-r--r--test/MC/Hexagon/inst_cmp_lt.ll6
-rw-r--r--test/MC/Hexagon/inst_cmp_ugt.ll6
-rw-r--r--test/MC/Hexagon/inst_cmp_ugti.ll6
-rw-r--r--test/MC/Hexagon/inst_cmp_ult.ll6
-rw-r--r--test/MC/Hexagon/inst_select.ll9
-rw-r--r--test/MC/Hexagon/inst_sxtb.ll6
-rw-r--r--test/MC/Hexagon/inst_sxth.ll6
-rw-r--r--test/MC/Hexagon/inst_zxtb.ll6
-rw-r--r--test/MC/Hexagon/inst_zxth.ll6
-rw-r--r--test/MC/MachO/absolutize.s46
-rw-r--r--test/MC/MachO/reloc-diff.s22
-rw-r--r--test/MC/Mips/mips-expansions.s65
-rw-r--r--test/MC/Mips/mips-pdr.s18
-rw-r--r--test/MC/Mips/mips-relocations.s26
-rw-r--r--test/MC/Mips/octeon-instructions.s4
-rw-r--r--test/MC/Mips/relocation.s10
-rw-r--r--test/MC/Mips/set-push-pop-directives-bad.s9
-rw-r--r--test/MC/Mips/set-push-pop-directives.s17
-rw-r--r--test/MC/Mips/set-softfloat-hardfloat-bad.s14
-rw-r--r--test/MC/Mips/set-softfloat-hardfloat.s12
-rw-r--r--test/MC/PowerPC/st-other-crash.s21
-rw-r--r--test/MC/PowerPC/vsx.s24
-rw-r--r--test/MC/X86/avx512-encodings.s1089
-rw-r--r--test/MC/X86/hex-immediates.s10
-rw-r--r--test/MC/X86/intel-syntax-avx512.s83
-rw-r--r--test/MC/X86/intel-syntax.s3
-rw-r--r--test/MC/X86/mpx-encodings.s38
-rw-r--r--test/MC/X86/x86-64-avx512bw.s35
-rw-r--r--test/MC/X86/x86-64-avx512bw_vl.s161
-rw-r--r--test/MC/X86/x86-64-avx512dq.s232
-rw-r--r--test/MC/X86/x86-64-avx512dq_vl.s239
-rw-r--r--test/MC/X86/x86-64-avx512f_vl.s935
-rw-r--r--test/Object/Inputs/macho-invalid-headerbin0 -> 24 bytes
-rw-r--r--test/Object/Inputs/macho64-invalid-incomplete-segment-load-commandbin0 -> 64 bytes
-rw-r--r--test/Object/Inputs/no-start-symbol.elf-x86_64bin0 -> 544 bytes
-rw-r--r--test/Object/X86/no-start-symbol.test9
-rw-r--r--test/Object/macho-invalid.test18
-rw-r--r--test/Object/obj2yaml.test47
-rw-r--r--test/Object/readobj-shared-object.test20
-rw-r--r--test/Transforms/CorrelatedValuePropagation/select.ll22
-rw-r--r--test/Transforms/GVN/unreachable_block_infinite_loop.ll29
-rw-r--r--test/Transforms/IndVarSimplify/exit_value_test2.ll52
-rw-r--r--test/Transforms/IndVarSimplify/exit_value_test3.ll24
-rw-r--r--test/Transforms/IndVarSimplify/lcssa-preservation.ll3
-rw-r--r--test/Transforms/InstCombine/fpcast.ll8
-rw-r--r--test/Transforms/InstCombine/load-bitcast32.ll79
-rw-r--r--test/Transforms/InstCombine/load-bitcast64.ll78
-rw-r--r--test/Transforms/InstCombine/pr23751.ll13
-rw-r--r--test/Transforms/InstCombine/select.ll13
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-bad-cost.ll58
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-bad-geps.ll2
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-heuristics.ll12
-rw-r--r--test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll37
-rw-r--r--test/Transforms/LoopVectorize/interleaved-accesses.ll467
-rw-r--r--test/Transforms/LoopVectorize/zero-sized-pointee-crash.ll27
-rw-r--r--test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll8
-rw-r--r--test/Transforms/MergeFunc/linkonce_odr.ll30
-rw-r--r--test/Transforms/NaryReassociate/NVPTX/nary-gep.ll31
-rw-r--r--test/Transforms/Reassociate/basictest.ll4
-rw-r--r--test/Transforms/Reassociate/canonicalize-neg-const.ll22
-rw-r--r--test/Transforms/RewriteStatepointsForGC/deref-pointers.ll77
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/R600/lit.local.cfg3
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/R600/split-gep-and-gvn-addrspace-addressing-modes.ll94
-rw-r--r--test/Transforms/Sink/convergent.ll24
-rw-r--r--test/tools/dsymutil/Inputs/frame-dw2.ll71
-rw-r--r--test/tools/dsymutil/Inputs/frame-dw4.ll71
-rw-r--r--test/tools/dsymutil/Inputs/frame.c10
-rw-r--r--test/tools/dsymutil/X86/basic-linking-x86.test4
-rw-r--r--test/tools/dsymutil/X86/basic-lto-linking-x86.test1
-rw-r--r--test/tools/dsymutil/X86/frame-1.test32
-rw-r--r--test/tools/dsymutil/X86/frame-2.test47
-rw-r--r--test/tools/dsymutil/debug-map-parsing.test92
-rw-r--r--test/tools/dsymutil/yaml-object-address-rewrite.test44
-rw-r--r--test/tools/llvm-objdump/invalid-input.test6
-rw-r--r--test/tools/llvm-readobj/elf-dtflags.test4
-rw-r--r--test/tools/llvm-readobj/sections-ext.test2
359 files changed, 18790 insertions, 1693 deletions
diff --git a/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll b/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
index 1b47341ed4d6e..6a5e42fbd5558 100644
--- a/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
+++ b/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
@@ -38,3 +38,73 @@ for.body:
for.end:
ret void
}
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; unsigned i, j;
+; for (i = 1; i < SIZE; i++) {
+; for (j = i; j < SIZE; j++) {
+; a[i][j] = a[i+1][j-1] + 2;
+; }
+; }
+; Extends the previous example to coupled MIV subscripts.
+
+
+@a = global [10004 x [10004 x i32]] zeroinitializer, align 16
+
+; Function Attrs: nounwind uwtable
+define void @coupled_miv_type_mismatch(i32 %n) #0 {
+; CHECK-LABEL: 'Dependence Analysis' for function 'coupled_miv_type_mismatch'
+; DELIN-LABEL: 'Dependence Analysis' for function 'coupled_miv_type_mismatch'
+entry:
+ br label %for.cond
+
+; CHECK: da analyze - input [0 *]!
+; CHECK: da analyze - anti [1 *]!
+; CHECK: da analyze - none!
+; DELIN: da analyze - input [0 *]!
+; DELIN: da analyze - anti [1 *]!
+; DELIN: da analyze - none!
+for.cond: ; preds = %for.inc11, %entry
+ %indvars.iv11 = phi i64 [ %indvars.iv.next12, %for.inc11 ], [ 1, %entry ]
+ %exitcond14 = icmp ne i64 %indvars.iv11, 10000
+ br i1 %exitcond14, label %for.cond1.preheader, label %for.end13
+
+for.cond1.preheader: ; preds = %for.cond
+ %0 = trunc i64 %indvars.iv11 to i32
+ br label %for.cond1
+
+for.cond1: ; preds = %for.cond1.preheader, %for.body3
+ %indvars.iv8 = phi i64 [ %indvars.iv11, %for.cond1.preheader ], [ %indvars.iv.next9, %for.body3 ]
+ %j.0 = phi i32 [ %inc, %for.body3 ], [ %0, %for.cond1.preheader ]
+ %lftr.wideiv = trunc i64 %indvars.iv8 to i32
+ %exitcond = icmp ne i32 %lftr.wideiv, 10000
+ br i1 %exitcond, label %for.body3, label %for.inc11
+
+for.body3: ; preds = %for.cond1
+ %sub = add nsw i32 %j.0, -1
+ %idxprom = zext i32 %sub to i64
+ %1 = add nuw nsw i64 %indvars.iv11, 1
+ %arrayidx5 = getelementptr inbounds [10004 x [10004 x i32]], [10004 x [10004 x i32]]* @a, i64 0, i64 %1, i64 %idxprom
+ %2 = load i32, i32* %arrayidx5, align 4
+ %add6 = add nsw i32 %2, 2
+ %arrayidx10 = getelementptr inbounds [10004 x [10004 x i32]], [10004 x [10004 x i32]]* @a, i64 0, i64 %indvars.iv11, i64 %indvars.iv8
+ store i32 %add6, i32* %arrayidx10, align 4
+ %indvars.iv.next9 = add nuw nsw i64 %indvars.iv8, 1
+ %inc = add nuw nsw i32 %j.0, 1
+ br label %for.cond1
+
+for.inc11: ; preds = %for.cond1
+ %indvars.iv.next12 = add nuw nsw i64 %indvars.iv11, 1
+ br label %for.cond
+
+for.end13: ; preds = %for.cond
+ ret void
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 3.7.0 (https://vaivaswatha@bitbucket.org/compilertree/amd_clang.git 93a05fb75ee3411d24e8b2b184fc766a5318403e) (https://vaivaswatha@bitbucket.org/compilertree/amd_llvm.git 166d93d26efc912b517739f64d054a435e8e95cd)"}
diff --git a/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll b/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
new file mode 100644
index 0000000000000..f9871c643c9d5
--- /dev/null
+++ b/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
@@ -0,0 +1,58 @@
+; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
+
+; 3 reads and 3 writes should need 12 memchecks
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnueabi"
+
+; CHECK: Memory dependences are safe with run-time checks
+; Memory dependecies have labels starting from 0, so in
+; order to verify that we have n checks, we look for
+; (n-1): and not n:.
+
+; CHECK: Run-time memory checks:
+; CHECK-NEXT: 0:
+; CHECK: 11:
+; CHECK-NOT: 12:
+
+define void @testf(i16* %a,
+ i16* %b,
+ i16* %c,
+ i16* %d,
+ i16* %e,
+ i16* %f) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+ %add = add nuw nsw i64 %ind, 1
+
+ %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %ind
+ %loadA = load i16, i16* %arrayidxA, align 2
+
+ %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
+ %loadB = load i16, i16* %arrayidxB, align 2
+
+ %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %ind
+ %loadC = load i16, i16* %arrayidxC, align 2
+
+ %mul = mul i16 %loadB, %loadA
+ %mul1 = mul i16 %mul, %loadC
+
+ %arrayidxD = getelementptr inbounds i16, i16* %d, i64 %ind
+ store i16 %mul1, i16* %arrayidxD, align 2
+
+ %arrayidxE = getelementptr inbounds i16, i16* %e, i64 %ind
+ store i16 %mul, i16* %arrayidxE, align 2
+
+ %arrayidxF = getelementptr inbounds i16, i16* %f, i64 %ind
+ store i16 %mul1, i16* %arrayidxF, align 2
+
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll b/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
new file mode 100644
index 0000000000000..7357356629529
--- /dev/null
+++ b/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
@@ -0,0 +1,540 @@
+; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; Following cases are no dependence.
+
+; void nodep_Read_Write(int *A) {
+; int *B = A + 1;
+; for (unsigned i = 0; i < 1024; i+=3)
+; B[i] = A[i] + 1;
+; }
+
+; CHECK: function 'nodep_Read_Write':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Run-time memory checks:
+
+define void @nodep_Read_Write(i32* nocapture %A) {
+entry:
+ %add.ptr = getelementptr inbounds i32, i32* %A, i64 1
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %0, 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; int nodep_Write_Read(int *A) {
+; int sum = 0;
+; for (unsigned i = 0; i < 1024; i+=4) {
+; A[i] = i;
+; sum += A[i+3];
+; }
+;
+; return sum;
+; }
+
+; CHECK: function 'nodep_Write_Read':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Run-time memory checks:
+
+define i32 @nodep_Write_Read(i32* nocapture %A) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret i32 %add3
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %sum.013 = phi i32 [ 0, %entry ], [ %add3, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, i32* %arrayidx, align 4
+ %1 = or i64 %indvars.iv, 3
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %1
+ %2 = load i32, i32* %arrayidx2, align 4
+ %add3 = add nsw i32 %2, %sum.013
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; void nodep_Write_Write(int *A) {
+; for (unsigned i = 0; i < 1024; i+=2) {
+; A[i] = i;
+; A[i+1] = i+1;
+; }
+; }
+
+; CHECK: function 'nodep_Write_Write':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Run-time memory checks:
+
+define void @nodep_Write_Write(i32* nocapture %A) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, i32* %arrayidx, align 4
+ %1 = or i64 %indvars.iv, 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %1
+ %2 = trunc i64 %1 to i32
+ store i32 %2, i32* %arrayidx3, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; Following cases are unsafe depdences and are not vectorizable.
+
+; void unsafe_Read_Write(int *A) {
+; for (unsigned i = 0; i < 1024; i+=3)
+; A[i+3] = A[i] + 1;
+; }
+
+; CHECK: function 'unsafe_Read_Write':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: %0 = load i32, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, i32* %arrayidx3, align 4
+
+define void @unsafe_Read_Write(i32* nocapture %A) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.010 = phi i32 [ 0, %entry ], [ %add1, %for.body ]
+ %idxprom = zext i32 %i.010 to i64
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
+ %0 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %0, 1
+ %add1 = add i32 %i.010, 3
+ %idxprom2 = zext i32 %add1 to i64
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %idxprom2
+ store i32 %add, i32* %arrayidx3, align 4
+ %cmp = icmp ult i32 %add1, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; int unsafe_Write_Read(int *A) {
+; int sum = 0;
+; for (unsigned i = 0; i < 1024; i+=4) {
+; A[i] = i;
+; sum += A[i+4];
+; }
+;
+; return sum;
+; }
+
+; CHECK: function 'unsafe_Write_Read':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: store i32 %0, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: %1 = load i32, i32* %arrayidx2, align 4
+
+define i32 @unsafe_Write_Read(i32* nocapture %A) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret i32 %add3
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %sum.013 = phi i32 [ 0, %entry ], [ %add3, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
+ %1 = load i32, i32* %arrayidx2, align 4
+ %add3 = add nsw i32 %1, %sum.013
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; void unsafe_Write_Write(int *A) {
+; for (unsigned i = 0; i < 1024; i+=2) {
+; A[i] = i;
+; A[i+2] = i+1;
+; }
+; }
+
+; CHECK: function 'unsafe_Write_Write':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: store i32 %0, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %2, i32* %arrayidx3, align 4
+
+define void @unsafe_Write_Write(i32* nocapture %A) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, i32* %arrayidx, align 4
+ %1 = or i64 %indvars.iv, 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
+ %2 = trunc i64 %1 to i32
+ store i32 %2, i32* %arrayidx3, align 4
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; Following cases check that strided accesses can be vectorized.
+
+; void vectorizable_Read_Write(int *A) {
+; int *B = A + 4;
+; for (unsigned i = 0; i < 1024; i+=2)
+; B[i] = A[i] + 1;
+; }
+
+; CHECK: function 'vectorizable_Read_Write':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: %0 = load i32, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, i32* %arrayidx2, align 4
+
+define void @vectorizable_Read_Write(i32* nocapture %A) {
+entry:
+ %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %0, 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; int vectorizable_Write_Read(int *A) {
+; int *B = A + 4;
+; int sum = 0;
+; for (unsigned i = 0; i < 1024; i+=2) {
+; A[i] = i;
+; sum += B[i];
+; }
+;
+; return sum;
+; }
+
+; CHECK: function 'vectorizable_Write_Read':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: store i32 %0, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: %1 = load i32, i32* %arrayidx2, align 4
+
+define i32 @vectorizable_Write_Read(i32* nocapture %A) {
+entry:
+ %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret i32 %add
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %sum.013 = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %1, %sum.013
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; void vectorizable_Write_Write(int *A) {
+; int *B = A + 4;
+; for (unsigned i = 0; i < 1024; i+=2) {
+; A[i] = i;
+; B[i] = i+1;
+; }
+; }
+
+; CHECK: function 'vectorizable_Write_Write':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: store i32 %0, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %2, i32* %arrayidx2, align 4
+
+define void @vectorizable_Write_Write(i32* nocapture %A) {
+entry:
+ %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, i32* %arrayidx, align 4
+ %1 = or i64 %indvars.iv, 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
+ %2 = trunc i64 %1 to i32
+ store i32 %2, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; void vectorizable_unscaled_Read_Write(int *A) {
+; int *B = (int *)((char *)A + 14);
+; for (unsigned i = 0; i < 1024; i+=2)
+; B[i] = A[i] + 1;
+; }
+
+; FIXME: This case looks like previous case @vectorizable_Read_Write. It sould
+; be vectorizable.
+
+; CHECK: function 'vectorizable_unscaled_Read_Write':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: BackwardVectorizableButPreventsForwarding:
+; CHECK-NEXT: %2 = load i32, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, i32* %arrayidx2, align 4
+
+define void @vectorizable_unscaled_Read_Write(i32* nocapture %A) {
+entry:
+ %0 = bitcast i32* %A to i8*
+ %add.ptr = getelementptr inbounds i8, i8* %0, i64 14
+ %1 = bitcast i8* %add.ptr to i32*
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %2 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %2, 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; int vectorizable_unscaled_Write_Read(int *A) {
+; int *B = (int *)((char *)A + 17);
+; int sum = 0;
+; for (unsigned i = 0; i < 1024; i+=2) {
+; A[i] = i;
+; sum += B[i];
+; }
+;
+; return sum;
+; }
+
+; CHECK: for function 'vectorizable_unscaled_Write_Read':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: store i32 %2, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: %3 = load i32, i32* %arrayidx2, align 4
+
+define i32 @vectorizable_unscaled_Write_Read(i32* nocapture %A) {
+entry:
+ %0 = bitcast i32* %A to i8*
+ %add.ptr = getelementptr inbounds i8, i8* %0, i64 17
+ %1 = bitcast i8* %add.ptr to i32*
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret i32 %add
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %sum.013 = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %2 = trunc i64 %indvars.iv to i32
+ store i32 %2, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
+ %3 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %3, %sum.013
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; void unsafe_unscaled_Read_Write(int *A) {
+; int *B = (int *)((char *)A + 11);
+; for (unsigned i = 0; i < 1024; i+=2)
+; B[i] = A[i] + 1;
+; }
+
+; CHECK: function 'unsafe_unscaled_Read_Write':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: %2 = load i32, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, i32* %arrayidx2, align 4
+
+define void @unsafe_unscaled_Read_Write(i32* nocapture %A) {
+entry:
+ %0 = bitcast i32* %A to i8*
+ %add.ptr = getelementptr inbounds i8, i8* %0, i64 11
+ %1 = bitcast i8* %add.ptr to i32*
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %2 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %2, 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; CHECK: function 'unsafe_unscaled_Read_Write2':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: %2 = load i32, i32* %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, i32* %arrayidx2, align 4
+
+; void unsafe_unscaled_Read_Write2(int *A) {
+; int *B = (int *)((char *)A + 1);
+; for (unsigned i = 0; i < 1024; i+=2)
+; B[i] = A[i] + 1;
+; }
+
+define void @unsafe_unscaled_Read_Write2(i32* nocapture %A) {
+entry:
+ %0 = bitcast i32* %A to i8*
+ %add.ptr = getelementptr inbounds i8, i8* %0, i64 1
+ %1 = bitcast i8* %add.ptr to i32*
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %2 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %2, 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; Following case checks that interleaved stores have dependences with another
+; store and can not pass dependence check.
+
+; void interleaved_stores(int *A) {
+; int *B = (int *) ((char *)A + 1);
+; for(int i = 0; i < 1024; i+=2) {
+; B[i] = i; // (1)
+; A[i+1] = i + 1; // (2)
+; B[i+1] = i + 1; // (3)
+; }
+; }
+;
+; The access (2) has overlaps with (1) and (3).
+
+; CHECK: function 'interleaved_stores':
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop
+; CHECK-NEXT: Interesting Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: store i32 %4, i32* %arrayidx5, align 4 ->
+; CHECK-NEXT: store i32 %4, i32* %arrayidx9, align 4
+; CHECK: Backward:
+; CHECK-NEXT: store i32 %2, i32* %arrayidx2, align 4 ->
+; CHECK-NEXT: store i32 %4, i32* %arrayidx5, align 4
+
+define void @interleaved_stores(i32* nocapture %A) {
+entry:
+ %0 = bitcast i32* %A to i8*
+ %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
+ %1 = bitcast i8* %incdec.ptr to i32*
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %2 = trunc i64 %indvars.iv to i32
+ %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
+ store i32 %2, i32* %arrayidx2, align 4
+ %3 = or i64 %indvars.iv, 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %3
+ %4 = trunc i64 %3 to i32
+ store i32 %4, i32* %arrayidx5, align 4
+ %arrayidx9 = getelementptr inbounds i32, i32* %1, i64 %3
+ store i32 %4, i32* %arrayidx9, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp slt i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
diff --git a/test/Analysis/ValueTracking/memory-dereferenceable.ll b/test/Analysis/ValueTracking/memory-dereferenceable.ll
index dae64d7acc228..f49f4f77f404f 100644
--- a/test/Analysis/ValueTracking/memory-dereferenceable.ll
+++ b/test/Analysis/ValueTracking/memory-dereferenceable.ll
@@ -10,6 +10,9 @@ declare zeroext i1 @return_i1()
@globalstr = global [6 x i8] c"hello\00"
@globali32ptr = external global i32*
+%struct.A = type { [8 x i8], [5 x i8] }
+@globalstruct = external global %struct.A
+
define void @test(i32 addrspace(1)* dereferenceable(8) %dparam) gc "statepoint-example" {
; CHECK: The following are dereferenceable:
; CHECK: %globalptr
@@ -22,6 +25,8 @@ define void @test(i32 addrspace(1)* dereferenceable(8) %dparam) gc "statepoint-e
; CHECK-NOT: %d2_load
; CHECK-NOT: %d_or_null_load
; CHECK: %d_or_null_non_null_load
+; CHECK: %within_allocation
+; CHECK-NOT: %outside_allocation
entry:
%globalptr = getelementptr inbounds [6 x i8], [6 x i8]* @globalstr, i32 0, i32 0
%load1 = load i8, i8* %globalptr
@@ -54,6 +59,14 @@ entry:
%d_or_null_non_null_load = load i32*, i32** @globali32ptr, !nonnull !2, !dereferenceable_or_null !0
%load10 = load i32, i32* %d_or_null_non_null_load
+ ; It's OK to overrun static array size as long as we stay within underlying object size
+ %within_allocation = getelementptr inbounds %struct.A, %struct.A* @globalstruct, i64 0, i32 0, i64 10
+ %load11 = load i8, i8* %within_allocation
+
+ ; GEP is outside the underlying object size
+ %outside_allocation = getelementptr inbounds %struct.A, %struct.A* @globalstruct, i64 0, i32 1, i64 10
+ %load12 = load i8, i8* %outside_allocation
+
ret void
}
diff --git a/test/Assembler/mdcompileunit.ll b/test/Assembler/dicompileunit.ll
index dc136f0b83e9a..dc136f0b83e9a 100644
--- a/test/Assembler/mdcompileunit.ll
+++ b/test/Assembler/dicompileunit.ll
diff --git a/test/Assembler/mdexpression.ll b/test/Assembler/diexpression.ll
index 31be86cef2d4c..31be86cef2d4c 100644
--- a/test/Assembler/mdexpression.ll
+++ b/test/Assembler/diexpression.ll
diff --git a/test/Assembler/mdfile-escaped-chars.ll b/test/Assembler/difile-escaped-chars.ll
index 5900fdca061dc..5900fdca061dc 100644
--- a/test/Assembler/mdfile-escaped-chars.ll
+++ b/test/Assembler/difile-escaped-chars.ll
diff --git a/test/Assembler/mdglobalvariable.ll b/test/Assembler/diglobalvariable.ll
index 0d027d310f6d8..0d027d310f6d8 100644
--- a/test/Assembler/mdglobalvariable.ll
+++ b/test/Assembler/diglobalvariable.ll
diff --git a/test/Assembler/mdimportedentity.ll b/test/Assembler/diimportedentity.ll
index 929267e3b25e5..929267e3b25e5 100644
--- a/test/Assembler/mdimportedentity.ll
+++ b/test/Assembler/diimportedentity.ll
diff --git a/test/Assembler/mdlexicalblock.ll b/test/Assembler/dilexicalblock.ll
index 2cab372384caa..2cab372384caa 100644
--- a/test/Assembler/mdlexicalblock.ll
+++ b/test/Assembler/dilexicalblock.ll
diff --git a/test/Assembler/dilocalvariable-arg-large.ll b/test/Assembler/dilocalvariable-arg-large.ll
new file mode 100644
index 0000000000000..7788186a54ab3
--- /dev/null
+++ b/test/Assembler/dilocalvariable-arg-large.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
+; RUN: verify-uselistorder %s
+
+; CHECK: !named = !{!0, !1}
+!named = !{!0, !1}
+
+!0 = distinct !DISubprogram()
+
+; CHECK: !1 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "foo", arg: 65535, scope: !0)
+!1 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "foo", arg: 65535, scope: !0)
diff --git a/test/Assembler/mdlocalvariable.ll b/test/Assembler/dilocalvariable.ll
index 312373ca8623a..312373ca8623a 100644
--- a/test/Assembler/mdlocalvariable.ll
+++ b/test/Assembler/dilocalvariable.ll
diff --git a/test/Assembler/mdlocation.ll b/test/Assembler/dilocation.ll
index a468f8abe9c6e..a468f8abe9c6e 100644
--- a/test/Assembler/mdlocation.ll
+++ b/test/Assembler/dilocation.ll
diff --git a/test/Assembler/mdnamespace.ll b/test/Assembler/dinamespace.ll
index 5d8b6b3fa7d7a..5d8b6b3fa7d7a 100644
--- a/test/Assembler/mdnamespace.ll
+++ b/test/Assembler/dinamespace.ll
diff --git a/test/Assembler/mdobjcproperty.ll b/test/Assembler/diobjcproperty.ll
index ca13e27662d4f..ca13e27662d4f 100644
--- a/test/Assembler/mdobjcproperty.ll
+++ b/test/Assembler/diobjcproperty.ll
diff --git a/test/Assembler/mdsubprogram.ll b/test/Assembler/disubprogram.ll
index 3fa1081889ffe..3fa1081889ffe 100644
--- a/test/Assembler/mdsubprogram.ll
+++ b/test/Assembler/disubprogram.ll
diff --git a/test/Assembler/mdsubrange-empty-array.ll b/test/Assembler/disubrange-empty-array.ll
index 7b5279e3d3c2c..7b5279e3d3c2c 100644
--- a/test/Assembler/mdsubrange-empty-array.ll
+++ b/test/Assembler/disubrange-empty-array.ll
diff --git a/test/Assembler/mdsubroutinetype.ll b/test/Assembler/disubroutinetype.ll
index 4ec2be7de3b98..4ec2be7de3b98 100644
--- a/test/Assembler/mdsubroutinetype.ll
+++ b/test/Assembler/disubroutinetype.ll
diff --git a/test/Assembler/mdtemplateparameter.ll b/test/Assembler/ditemplateparameter.ll
index a356ad4e7bc94..a356ad4e7bc94 100644
--- a/test/Assembler/mdtemplateparameter.ll
+++ b/test/Assembler/ditemplateparameter.ll
diff --git a/test/Assembler/mdtype-large-values.ll b/test/Assembler/ditype-large-values.ll
index a371ac68f90a8..a371ac68f90a8 100644
--- a/test/Assembler/mdtype-large-values.ll
+++ b/test/Assembler/ditype-large-values.ll
diff --git a/test/Assembler/getelementptr.ll b/test/Assembler/getelementptr.ll
index ee88346eb00b4..0a0fddfc4dda7 100644
--- a/test/Assembler/getelementptr.ll
+++ b/test/Assembler/getelementptr.ll
@@ -19,6 +19,10 @@
@y = global i32* getelementptr ({ i32, i32 }, { i32, i32 }* @x, i16 42, i32 0)
; CHECK: @y = global i32* getelementptr ({ i32, i32 }, { i32, i32 }* @x, i16 42, i32 0)
+@PR23753_a = external global i8
+@PR23753_b = global i8* getelementptr (i8, i8* @PR23753_a, i64 ptrtoint (i8* @PR23753_a to i64))
+; CHECK: @PR23753_b = global i8* getelementptr (i8, i8* @PR23753_a, i64 ptrtoint (i8* @PR23753_a to i64))
+
; See if i92 indices work too.
define i32 *@test({i32, i32}* %t, i92 %n) {
; CHECK: @test
diff --git a/test/Assembler/invalid-mdcompileunit-language-bad.ll b/test/Assembler/invalid-dicompileunit-language-bad.ll
index e6f49f3fba476..e6f49f3fba476 100644
--- a/test/Assembler/invalid-mdcompileunit-language-bad.ll
+++ b/test/Assembler/invalid-dicompileunit-language-bad.ll
diff --git a/test/Assembler/invalid-mdcompileunit-language-overflow.ll b/test/Assembler/invalid-dicompileunit-language-overflow.ll
index c558f7aaa2587..c558f7aaa2587 100644
--- a/test/Assembler/invalid-mdcompileunit-language-overflow.ll
+++ b/test/Assembler/invalid-dicompileunit-language-overflow.ll
diff --git a/test/Assembler/invalid-mdcompileunit-missing-language.ll b/test/Assembler/invalid-dicompileunit-missing-language.ll
index 15631b7f640b2..15631b7f640b2 100644
--- a/test/Assembler/invalid-mdcompileunit-missing-language.ll
+++ b/test/Assembler/invalid-dicompileunit-missing-language.ll
diff --git a/test/Assembler/invalid-mdcompileunit-null-file.ll b/test/Assembler/invalid-dicompileunit-null-file.ll
index cc1892e914650..cc1892e914650 100644
--- a/test/Assembler/invalid-mdcompileunit-null-file.ll
+++ b/test/Assembler/invalid-dicompileunit-null-file.ll
diff --git a/test/Assembler/invalid-mdcompositetype-missing-tag.ll b/test/Assembler/invalid-dicompositetype-missing-tag.ll
index e68c01479902a..e68c01479902a 100644
--- a/test/Assembler/invalid-mdcompositetype-missing-tag.ll
+++ b/test/Assembler/invalid-dicompositetype-missing-tag.ll
diff --git a/test/Assembler/invalid-mdderivedtype-missing-basetype.ll b/test/Assembler/invalid-diderivedtype-missing-basetype.ll
index 308c2ea90ef15..308c2ea90ef15 100644
--- a/test/Assembler/invalid-mdderivedtype-missing-basetype.ll
+++ b/test/Assembler/invalid-diderivedtype-missing-basetype.ll
diff --git a/test/Assembler/invalid-mdderivedtype-missing-tag.ll b/test/Assembler/invalid-diderivedtype-missing-tag.ll
index fd286f4c29603..fd286f4c29603 100644
--- a/test/Assembler/invalid-mdderivedtype-missing-tag.ll
+++ b/test/Assembler/invalid-diderivedtype-missing-tag.ll
diff --git a/test/Assembler/invalid-mdenumerator-missing-name.ll b/test/Assembler/invalid-dienumerator-missing-name.ll
index 656d3439ba228..656d3439ba228 100644
--- a/test/Assembler/invalid-mdenumerator-missing-name.ll
+++ b/test/Assembler/invalid-dienumerator-missing-name.ll
diff --git a/test/Assembler/invalid-mdenumerator-missing-value.ll b/test/Assembler/invalid-dienumerator-missing-value.ll
index 0eee3be41b6a1..0eee3be41b6a1 100644
--- a/test/Assembler/invalid-mdenumerator-missing-value.ll
+++ b/test/Assembler/invalid-dienumerator-missing-value.ll
diff --git a/test/Assembler/invalid-mdexpression-large.ll b/test/Assembler/invalid-diexpression-large.ll
index 32b77ee513872..32b77ee513872 100644
--- a/test/Assembler/invalid-mdexpression-large.ll
+++ b/test/Assembler/invalid-diexpression-large.ll
diff --git a/test/Assembler/invalid-mdexpression-verify.ll b/test/Assembler/invalid-diexpression-verify.ll
index 50d6943dead80..50d6943dead80 100644
--- a/test/Assembler/invalid-mdexpression-verify.ll
+++ b/test/Assembler/invalid-diexpression-verify.ll
diff --git a/test/Assembler/invalid-mdfile-missing-directory.ll b/test/Assembler/invalid-difile-missing-directory.ll
index a54a22f2e189e..a54a22f2e189e 100644
--- a/test/Assembler/invalid-mdfile-missing-directory.ll
+++ b/test/Assembler/invalid-difile-missing-directory.ll
diff --git a/test/Assembler/invalid-mdfile-missing-filename.ll b/test/Assembler/invalid-difile-missing-filename.ll
index 17bf3da3f7a37..17bf3da3f7a37 100644
--- a/test/Assembler/invalid-mdfile-missing-filename.ll
+++ b/test/Assembler/invalid-difile-missing-filename.ll
diff --git a/test/Assembler/invalid-mdglobalvariable-empty-name.ll b/test/Assembler/invalid-diglobalvariable-empty-name.ll
index a4e69f3c8b751..a4e69f3c8b751 100644
--- a/test/Assembler/invalid-mdglobalvariable-empty-name.ll
+++ b/test/Assembler/invalid-diglobalvariable-empty-name.ll
diff --git a/test/Assembler/invalid-mdglobalvariable-missing-name.ll b/test/Assembler/invalid-diglobalvariable-missing-name.ll
index baf4d73d94f48..baf4d73d94f48 100644
--- a/test/Assembler/invalid-mdglobalvariable-missing-name.ll
+++ b/test/Assembler/invalid-diglobalvariable-missing-name.ll
diff --git a/test/Assembler/invalid-mdimportedentity-missing-scope.ll b/test/Assembler/invalid-diimportedentity-missing-scope.ll
index 06164e88d9b3d..06164e88d9b3d 100644
--- a/test/Assembler/invalid-mdimportedentity-missing-scope.ll
+++ b/test/Assembler/invalid-diimportedentity-missing-scope.ll
diff --git a/test/Assembler/invalid-mdimportedentity-missing-tag.ll b/test/Assembler/invalid-diimportedentity-missing-tag.ll
index 996b1ffdc9bf5..996b1ffdc9bf5 100644
--- a/test/Assembler/invalid-mdimportedentity-missing-tag.ll
+++ b/test/Assembler/invalid-diimportedentity-missing-tag.ll
diff --git a/test/Assembler/invalid-mdlexicalblock-missing-scope.ll b/test/Assembler/invalid-dilexicalblock-missing-scope.ll
index 6108e692b21cc..6108e692b21cc 100644
--- a/test/Assembler/invalid-mdlexicalblock-missing-scope.ll
+++ b/test/Assembler/invalid-dilexicalblock-missing-scope.ll
diff --git a/test/Assembler/invalid-mdlexicalblock-null-scope.ll b/test/Assembler/invalid-dilexicalblock-null-scope.ll
index 7457b6085d612..7457b6085d612 100644
--- a/test/Assembler/invalid-mdlexicalblock-null-scope.ll
+++ b/test/Assembler/invalid-dilexicalblock-null-scope.ll
diff --git a/test/Assembler/invalid-mdlexicalblockfile-missing-discriminator.ll b/test/Assembler/invalid-dilexicalblockfile-missing-discriminator.ll
index bd8395ccf4dfd..bd8395ccf4dfd 100644
--- a/test/Assembler/invalid-mdlexicalblockfile-missing-discriminator.ll
+++ b/test/Assembler/invalid-dilexicalblockfile-missing-discriminator.ll
diff --git a/test/Assembler/invalid-mdlexicalblockfile-missing-scope.ll b/test/Assembler/invalid-dilexicalblockfile-missing-scope.ll
index fd037501c40a8..fd037501c40a8 100644
--- a/test/Assembler/invalid-mdlexicalblockfile-missing-scope.ll
+++ b/test/Assembler/invalid-dilexicalblockfile-missing-scope.ll
diff --git a/test/Assembler/invalid-mdlexicalblockfile-null-scope.ll b/test/Assembler/invalid-dilexicalblockfile-null-scope.ll
index ae1a5d432e3de..ae1a5d432e3de 100644
--- a/test/Assembler/invalid-mdlexicalblockfile-null-scope.ll
+++ b/test/Assembler/invalid-dilexicalblockfile-null-scope.ll
diff --git a/test/Assembler/invalid-dilocalvariable-arg-large.ll b/test/Assembler/invalid-dilocalvariable-arg-large.ll
new file mode 100644
index 0000000000000..d62da601e13cb
--- /dev/null
+++ b/test/Assembler/invalid-dilocalvariable-arg-large.ll
@@ -0,0 +1,6 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+!0 = !DILocalVariable(tag: DW_TAG_arg_variable, scope: !{}, arg: 65535)
+
+; CHECK: <stdin>:[[@LINE+1]]:66: error: value for 'arg' too large, limit is 65535
+!1 = !DILocalVariable(tag: DW_TAG_arg_variable, scope: !{}, arg: 65536)
diff --git a/test/Assembler/invalid-dilocalvariable-arg-negative.ll b/test/Assembler/invalid-dilocalvariable-arg-negative.ll
new file mode 100644
index 0000000000000..08e370a36660c
--- /dev/null
+++ b/test/Assembler/invalid-dilocalvariable-arg-negative.ll
@@ -0,0 +1,6 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+!0 = !DILocalVariable(tag: DW_TAG_arg_variable, scope: !{}, arg: 0)
+
+; CHECK: <stdin>:[[@LINE+1]]:66: error: expected unsigned integer
+!1 = !DILocalVariable(tag: DW_TAG_arg_variable, scope: !{}, arg: -1)
diff --git a/test/Assembler/invalid-mdlocalvariable-missing-scope.ll b/test/Assembler/invalid-dilocalvariable-missing-scope.ll
index 45dcad7136099..45dcad7136099 100644
--- a/test/Assembler/invalid-mdlocalvariable-missing-scope.ll
+++ b/test/Assembler/invalid-dilocalvariable-missing-scope.ll
diff --git a/test/Assembler/invalid-mdlocalvariable-missing-tag.ll b/test/Assembler/invalid-dilocalvariable-missing-tag.ll
index 18062edf75bd3..18062edf75bd3 100644
--- a/test/Assembler/invalid-mdlocalvariable-missing-tag.ll
+++ b/test/Assembler/invalid-dilocalvariable-missing-tag.ll
diff --git a/test/Assembler/invalid-mdlocalvariable-null-scope.ll b/test/Assembler/invalid-dilocalvariable-null-scope.ll
index 859412a2568c3..859412a2568c3 100644
--- a/test/Assembler/invalid-mdlocalvariable-null-scope.ll
+++ b/test/Assembler/invalid-dilocalvariable-null-scope.ll
diff --git a/test/Assembler/invalid-mdlocation-field-bad.ll b/test/Assembler/invalid-dilocation-field-bad.ll
index e68aa45b03eb3..e68aa45b03eb3 100644
--- a/test/Assembler/invalid-mdlocation-field-bad.ll
+++ b/test/Assembler/invalid-dilocation-field-bad.ll
diff --git a/test/Assembler/invalid-mdlocation-field-twice.ll b/test/Assembler/invalid-dilocation-field-twice.ll
index d1dab68246c14..d1dab68246c14 100644
--- a/test/Assembler/invalid-mdlocation-field-twice.ll
+++ b/test/Assembler/invalid-dilocation-field-twice.ll
diff --git a/test/Assembler/invalid-mdlocation-missing-scope-2.ll b/test/Assembler/invalid-dilocation-missing-scope-2.ll
index 380e3fde94f64..380e3fde94f64 100644
--- a/test/Assembler/invalid-mdlocation-missing-scope-2.ll
+++ b/test/Assembler/invalid-dilocation-missing-scope-2.ll
diff --git a/test/Assembler/invalid-mdlocation-missing-scope.ll b/test/Assembler/invalid-dilocation-missing-scope.ll
index fb0cd866dc167..fb0cd866dc167 100644
--- a/test/Assembler/invalid-mdlocation-missing-scope.ll
+++ b/test/Assembler/invalid-dilocation-missing-scope.ll
diff --git a/test/Assembler/invalid-mdlocation-null-scope.ll b/test/Assembler/invalid-dilocation-null-scope.ll
index 38c59e15d15a4..38c59e15d15a4 100644
--- a/test/Assembler/invalid-mdlocation-null-scope.ll
+++ b/test/Assembler/invalid-dilocation-null-scope.ll
diff --git a/test/Assembler/invalid-mdlocation-overflow-column.ll b/test/Assembler/invalid-dilocation-overflow-column.ll
index 043f84d3ab21b..043f84d3ab21b 100644
--- a/test/Assembler/invalid-mdlocation-overflow-column.ll
+++ b/test/Assembler/invalid-dilocation-overflow-column.ll
diff --git a/test/Assembler/invalid-mdlocation-overflow-line.ll b/test/Assembler/invalid-dilocation-overflow-line.ll
index 7a6774231a97d..7a6774231a97d 100644
--- a/test/Assembler/invalid-mdlocation-overflow-line.ll
+++ b/test/Assembler/invalid-dilocation-overflow-line.ll
diff --git a/test/Assembler/invalid-mdnamespace-missing-namespace.ll b/test/Assembler/invalid-dinamespace-missing-namespace.ll
index c29391b125d72..c29391b125d72 100644
--- a/test/Assembler/invalid-mdnamespace-missing-namespace.ll
+++ b/test/Assembler/invalid-dinamespace-missing-namespace.ll
diff --git a/test/Assembler/invalid-mdsubrange-count-large.ll b/test/Assembler/invalid-disubrange-count-large.ll
index 003274f3a072c..003274f3a072c 100644
--- a/test/Assembler/invalid-mdsubrange-count-large.ll
+++ b/test/Assembler/invalid-disubrange-count-large.ll
diff --git a/test/Assembler/invalid-mdsubrange-count-missing.ll b/test/Assembler/invalid-disubrange-count-missing.ll
index 8fc4487117f68..8fc4487117f68 100644
--- a/test/Assembler/invalid-mdsubrange-count-missing.ll
+++ b/test/Assembler/invalid-disubrange-count-missing.ll
diff --git a/test/Assembler/invalid-mdsubrange-count-negative.ll b/test/Assembler/invalid-disubrange-count-negative.ll
index f2ad4c302cb2f..f2ad4c302cb2f 100644
--- a/test/Assembler/invalid-mdsubrange-count-negative.ll
+++ b/test/Assembler/invalid-disubrange-count-negative.ll
diff --git a/test/Assembler/invalid-mdsubrange-lowerBound-max.ll b/test/Assembler/invalid-disubrange-lowerBound-max.ll
index e163dc47804d7..e163dc47804d7 100644
--- a/test/Assembler/invalid-mdsubrange-lowerBound-max.ll
+++ b/test/Assembler/invalid-disubrange-lowerBound-max.ll
diff --git a/test/Assembler/invalid-mdsubrange-lowerBound-min.ll b/test/Assembler/invalid-disubrange-lowerBound-min.ll
index 1dc3a141a5765..1dc3a141a5765 100644
--- a/test/Assembler/invalid-mdsubrange-lowerBound-min.ll
+++ b/test/Assembler/invalid-disubrange-lowerBound-min.ll
diff --git a/test/Assembler/invalid-mdsubroutinetype-missing-types.ll b/test/Assembler/invalid-disubroutinetype-missing-types.ll
index 086e5cc593ec8..086e5cc593ec8 100644
--- a/test/Assembler/invalid-mdsubroutinetype-missing-types.ll
+++ b/test/Assembler/invalid-disubroutinetype-missing-types.ll
diff --git a/test/Assembler/invalid-mdtemplatetypeparameter-missing-type.ll b/test/Assembler/invalid-ditemplatetypeparameter-missing-type.ll
index 797abd2c3044f..797abd2c3044f 100644
--- a/test/Assembler/invalid-mdtemplatetypeparameter-missing-type.ll
+++ b/test/Assembler/invalid-ditemplatetypeparameter-missing-type.ll
diff --git a/test/Assembler/invalid-mdtemplatevalueparameter-missing-value.ll b/test/Assembler/invalid-ditemplatevalueparameter-missing-value.ll
index 883bea1c9ad2b..883bea1c9ad2b 100644
--- a/test/Assembler/invalid-mdtemplatevalueparameter-missing-value.ll
+++ b/test/Assembler/invalid-ditemplatevalueparameter-missing-value.ll
diff --git a/test/Assembler/metadata.ll b/test/Assembler/metadata.ll
index 4fb8851a9f40a..052ac1b5097e0 100644
--- a/test/Assembler/metadata.ll
+++ b/test/Assembler/metadata.ll
@@ -1,7 +1,7 @@
; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
; RUN: verify-uselistorder %s
-; CHECK: @test
+; CHECK-LABEL: @test
; CHECK: ret void, !bar !1, !foo !0
define void @test() {
add i32 2, 1, !bar !0
@@ -11,17 +11,24 @@ define void @test() {
ret void, !foo !0, !bar !1
}
-; CHECK: define void @test2() !foo !2 !baz !3
+; CHECK-LABEL: define void @test2() !foo !2 !baz !3
define void @test2() !foo !2 !baz !3 {
unreachable
}
-; CHECK: define void @test3() !bar !3
+; CHECK-LABEL: define void @test3() !bar !3
; CHECK: unreachable, !bar !4
define void @test3() !bar !3 {
unreachable, !bar !4
}
+; CHECK-LABEL: define void @test_attachment_name() {
+; CHECK: unreachable, !\342abc !4
+define void @test_attachment_name() {
+ ;; Escape the first character when printing text IR, since it's a digit
+ unreachable, !\34\32abc !4
+}
+
!0 = !DILocation(line: 662302, column: 26, scope: !1)
!1 = !DISubprogram(name: "foo")
!2 = distinct !{}
diff --git a/test/Bitcode/Inputs/invalid-alias-type-mismatch.bc b/test/Bitcode/Inputs/invalid-alias-type-mismatch.bc
new file mode 100644
index 0000000000000..5c4298944803c
--- /dev/null
+++ b/test/Bitcode/Inputs/invalid-alias-type-mismatch.bc
Binary files differ
diff --git a/test/Bitcode/Inputs/invalid-metadata-not-followed-named-node.bc b/test/Bitcode/Inputs/invalid-metadata-not-followed-named-node.bc
new file mode 100644
index 0000000000000..42a2c3e65fecb
--- /dev/null
+++ b/test/Bitcode/Inputs/invalid-metadata-not-followed-named-node.bc
Binary files differ
diff --git a/test/Bitcode/Inputs/invalid-vector-length.bc b/test/Bitcode/Inputs/invalid-vector-length.bc
new file mode 100644
index 0000000000000..269df83a07ae3
--- /dev/null
+++ b/test/Bitcode/Inputs/invalid-vector-length.bc
Binary files differ
diff --git a/test/Bitcode/invalid.test b/test/Bitcode/invalid.test
index eb7f979d5745a..0aab553bb6155 100644
--- a/test/Bitcode/invalid.test
+++ b/test/Bitcode/invalid.test
@@ -187,3 +187,18 @@ RUN: not llvm-dis -disable-output %p/Inputs/invalid-array-operand-encoding.bc 2>
RUN: FileCheck --check-prefix=ARRAY-OP-ENC %s
ARRAY-OP-ENC: Array element type has to be an encoding of a type
+
+RUN: not llvm-dis -disable-output %p/Inputs/invalid-metadata-not-followed-named-node.bc 2>&1 | \
+RUN: FileCheck --check-prefix=META-NOT-FOLLOWED-BY-NAMED-META %s
+
+META-NOT-FOLLOWED-BY-NAMED-META: METADATA_NAME not followed by METADATA_NAMED_NODE
+
+RUN: not llvm-dis -disable-output %p/Inputs/invalid-vector-length.bc 2>&1 | \
+RUN: FileCheck --check-prefix=VECTOR-LENGTH %s
+
+VECTOR-LENGTH: Invalid vector length
+
+RUN: not llvm-dis -disable-output %p/Inputs/invalid-alias-type-mismatch.bc 2>&1 | \
+RUN: FileCheck --check-prefix=ALIAS-TYPE-MISMATCH %s
+
+ALIAS-TYPE-MISMATCH: Alias and aliasee types don't match
diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll
index ff18f73643371..11228c7e88083 100644
--- a/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -287,3 +287,43 @@ sw.bb.i.i:
%code1.i.i.phi.trans.insert = getelementptr inbounds %str1, %str1* %0, i64 0, i32 0, i32 0, i64 16
br label %sw.bb.i.i
}
+
+; CHECK-LABEL: select_and
+define i64 @select_and(i32 %v1, i32 %v2, i64 %a, i64 %b) {
+; CHECK: cmp
+; CHECK: ccmp{{.*}}, #0, ne
+; CHECK: csel{{.*}}, lt
+ %1 = icmp slt i32 %v1, %v2
+ %2 = icmp ne i32 5, %v2
+ %3 = and i1 %1, %2
+ %sel = select i1 %3, i64 %a, i64 %b
+ ret i64 %sel
+}
+
+; CHECK-LABEL: select_or
+define i64 @select_or(i32 %v1, i32 %v2, i64 %a, i64 %b) {
+; CHECK: cmp
+; CHECK: ccmp{{.*}}, #8, eq
+; CHECK: csel{{.*}}, lt
+ %1 = icmp slt i32 %v1, %v2
+ %2 = icmp ne i32 5, %v2
+ %3 = or i1 %1, %2
+ %sel = select i1 %3, i64 %a, i64 %b
+ ret i64 %sel
+}
+
+; CHECK-LABEL: select_complicated
+define i16 @select_complicated(double %v1, double %v2, i16 %a, i16 %b) {
+; CHECK: fcmp
+; CHECK: fccmp{{.*}}, #4, ne
+; CHECK: fccmp{{.*}}, #1, ne
+; CHECK: fccmp{{.*}}, #4, vc
+; CEHCK: csel{{.*}}, eq
+ %1 = fcmp one double %v1, %v2
+ %2 = fcmp oeq double %v2, 13.0
+ %3 = fcmp oeq double %v1, 42.0
+ %or0 = or i1 %2, %3
+ %or1 = or i1 %1, %or0
+ %sel = select i1 %or1, i16 %a, i16 %b
+ ret i16 %sel
+}
diff --git a/test/CodeGen/AArch64/arm64-named-reg-alloc.ll b/test/CodeGen/AArch64/arm64-named-reg-alloc.ll
index 0c564544a538b..5d48c17e12862 100644
--- a/test/CodeGen/AArch64/arm64-named-reg-alloc.ll
+++ b/test/CodeGen/AArch64/arm64-named-reg-alloc.ll
@@ -4,7 +4,7 @@
define i32 @get_stack() nounwind {
entry:
; FIXME: Include an allocatable-specific error message
-; CHECK: Invalid register name global variable
+; CHECK: Invalid register name "x5".
%sp = call i32 @llvm.read_register.i32(metadata !0)
ret i32 %sp
}
diff --git a/test/CodeGen/AArch64/arm64-named-reg-notareg.ll b/test/CodeGen/AArch64/arm64-named-reg-notareg.ll
index 759bc15807b5e..8a5fd6f1ac8bd 100644
--- a/test/CodeGen/AArch64/arm64-named-reg-notareg.ll
+++ b/test/CodeGen/AArch64/arm64-named-reg-notareg.ll
@@ -3,7 +3,7 @@
define i32 @get_stack() nounwind {
entry:
-; CHECK: Invalid register name global variable
+; CHECK: Invalid register name "notareg".
%sp = call i32 @llvm.read_register.i32(metadata !0)
ret i32 %sp
}
diff --git a/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll b/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll
new file mode 100644
index 0000000000000..e83cbab140a74
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll
@@ -0,0 +1,74 @@
+; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \
+; RUN: -O1 -global-merge-group-by-use -global-merge-ignore-single-use \
+; RUN: %s -o - | FileCheck %s
+
+; Check that, at -O1, we only merge globals used in minsize functions.
+; We assume that globals of the same size aren't reordered inside a set.
+; We use -global-merge-ignore-single-use, and thus only expect one merged set.
+
+@m1 = internal global i32 0, align 4
+@n1 = internal global i32 0, align 4
+
+; CHECK-LABEL: f1:
+define void @f1(i32 %a1, i32 %a2) minsize nounwind {
+; CHECK-NEXT: adrp x8, [[SET:__MergedGlobals]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF
+; CHECK-NEXT: stp w0, w1, [x8]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m1, align 4
+ store i32 %a2, i32* @n1, align 4
+ ret void
+}
+
+@m2 = internal global i32 0, align 4
+@n2 = internal global i32 0, align 4
+
+; CHECK-LABEL: f2:
+define void @f2(i32 %a1, i32 %a2) nounwind {
+; CHECK-NEXT: adrp x8, _m2@PAGE
+; CHECK-NEXT: adrp x9, _n2@PAGE
+; CHECK-NEXT: str w0, [x8, _m2@PAGEOFF]
+; CHECK-NEXT: str w1, [x9, _n2@PAGEOFF]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m2, align 4
+ store i32 %a2, i32* @n2, align 4
+ ret void
+}
+
+; If we have use sets partially overlapping between a minsize and a non-minsize
+; function, explicitly check that we only consider the globals used in the
+; minsize function for merging.
+
+@m3 = internal global i32 0, align 4
+@n3 = internal global i32 0, align 4
+
+; CHECK-LABEL: f3:
+define void @f3(i32 %a1, i32 %a2) minsize nounwind {
+; CHECK-NEXT: adrp x8, [[SET]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF
+; CHECK-NEXT: stp w0, w1, [x8, #8]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m3, align 4
+ store i32 %a2, i32* @n3, align 4
+ ret void
+}
+
+@n4 = internal global i32 0, align 4
+
+; CHECK-LABEL: f4:
+define void @f4(i32 %a1, i32 %a2) nounwind {
+; CHECK-NEXT: adrp x8, [[SET]]@PAGE
+; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF
+; CHECK-NEXT: adrp x9, _n4@PAGE
+; CHECK-NEXT: str w0, [x8, #8]
+; CHECK-NEXT: str w1, [x9, _n4@PAGEOFF]
+; CHECK-NEXT: ret
+ store i32 %a1, i32* @m3, align 4
+ store i32 %a2, i32* @n4, align 4
+ ret void
+}
+
+; CHECK-DAG: .zerofill __DATA,__bss,[[SET]],16,3
+; CHECK-DAG: .zerofill __DATA,__bss,_m2,4,2
+; CHECK-DAG: .zerofill __DATA,__bss,_n2,4,2
+; CHECK-DAG: .zerofill __DATA,__bss,_n4,4,2
diff --git a/test/CodeGen/AArch64/minmax.ll b/test/CodeGen/AArch64/minmax.ll
index a6b5adebe107a..df4912ca1f7ab 100644
--- a/test/CodeGen/AArch64/minmax.ll
+++ b/test/CodeGen/AArch64/minmax.ll
@@ -94,3 +94,14 @@ define <16 x i32> @t11(<16 x i32> %a, <16 x i32> %b) {
%t2 = select <16 x i1> %t1, <16 x i32> %a, <16 x i32> %b
ret <16 x i32> %t2
}
+
+; CHECK-LABEL: t12
+; CHECK-NOT: umin
+; The icmp is used by two instructions, so don't produce a umin node.
+define <16 x i8> @t12(<16 x i8> %a, <16 x i8> %b) {
+ %t1 = icmp ugt <16 x i8> %b, %a
+ %t2 = select <16 x i1> %t1, <16 x i8> %a, <16 x i8> %b
+ %t3 = zext <16 x i1> %t1 to <16 x i8>
+ %t4 = add <16 x i8> %t3, %t2
+ ret <16 x i8> %t4
+}
diff --git a/test/CodeGen/AArch64/special-reg.ll b/test/CodeGen/AArch64/special-reg.ll
new file mode 100644
index 0000000000000..91c32158d420d
--- /dev/null
+++ b/test/CodeGen/AArch64/special-reg.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -mtriple=aarch64-none-eabi -mcpu=cortex-a57 2>&1 | FileCheck %s
+
+define i64 @read_encoded_register() nounwind {
+entry:
+; CHECK-LABEL: read_encoded_register:
+; CHECK: mrs x0, S1_2_C3_C4_5
+ %reg = call i64 @llvm.read_register.i64(metadata !0)
+ ret i64 %reg
+}
+
+define i64 @read_daif() nounwind {
+entry:
+; CHECK-LABEL: read_daif:
+; CHECK: mrs x0, DAIF
+ %reg = call i64 @llvm.read_register.i64(metadata !1)
+ ret i64 %reg
+}
+
+define void @write_encoded_register(i64 %x) nounwind {
+entry:
+; CHECK-LABEL: write_encoded_register:
+; CHECK: msr S1_2_C3_C4_5, x0
+ call void @llvm.write_register.i64(metadata !0, i64 %x)
+ ret void
+}
+
+define void @write_daif(i64 %x) nounwind {
+entry:
+; CHECK-LABEL: write_daif:
+; CHECK: msr DAIF, x0
+ call void @llvm.write_register.i64(metadata !1, i64 %x)
+ ret void
+}
+
+define void @write_daifset() nounwind {
+entry:
+; CHECK-LABEL: write_daifset:
+; CHECK: msr DAIFSET, #2
+ call void @llvm.write_register.i64(metadata !2, i64 2)
+ ret void
+}
+
+declare i64 @llvm.read_register.i64(metadata) nounwind
+declare void @llvm.write_register.i64(metadata, i64) nounwind
+
+!0 = !{!"1:2:3:4:5"}
+!1 = !{!"daif"}
+!2 = !{!"daifset"}
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
index db5007b0758d4..86287c1178dbe 100644
--- a/test/CodeGen/ARM/atomic-ops-v8.ll
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -664,7 +664,7 @@ define void @test_atomic_load_min_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd [[OLD1:r[0-9]+|lr]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
@@ -782,7 +782,7 @@ define void @test_atomic_load_max_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK: ldrexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
@@ -900,7 +900,7 @@ define void @test_atomic_load_umin_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd [[OLD1:r[0-9]+|lr]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
@@ -1018,7 +1018,7 @@ define void @test_atomic_load_umax_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd [[OLD1:r[0-9]+|lr]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
@@ -1146,10 +1146,12 @@ define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; function there.
; CHECK-LE-DAG: eor{{(\.w)?}} [[MISMATCH_LO:r[0-9]+|lr]], [[OLD1]], r0
; CHECK-LE-DAG: eor{{(\.w)?}} [[MISMATCH_HI:r[0-9]+|lr]], [[OLD2]], r1
-; CHECK-LE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_LO]], [[MISMATCH_HI]]
+; CHECK-ARM-LE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_LO]], [[MISMATCH_HI]]
+; CHECK-THUMB-LE: orrs{{(\.w)?}} {{(r[0-9]+, )?}}[[MISMATCH_HI]], [[MISMATCH_LO]]
; CHECK-BE-DAG: eor{{(\.w)?}} [[MISMATCH_HI:r[0-9]+|lr]], [[OLD2]], r1
; CHECK-BE-DAG: eor{{(\.w)?}} [[MISMATCH_LO:r[0-9]+|lr]], [[OLD1]], r0
-; CHECK-BE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_HI]], [[MISMATCH_LO]]
+; CHECK-ARM-BE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_HI]], [[MISMATCH_LO]]
+; CHECK-THUMB-BE: orrs{{(\.w)?}} {{(r[0-9]+, )?}}[[MISMATCH_LO]], [[MISMATCH_HI]]
; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r2, r3 is a reasonable guess.
diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll
index 1982fa98ef41f..e9de52a3e1a00 100644
--- a/test/CodeGen/ARM/build-attributes.ll
+++ b/test/CodeGen/ARM/build-attributes.ll
@@ -923,7 +923,7 @@
; CORTEX-M4-SOFT: .eabi_attribute 7, 77
; CORTEX-M4-SOFT: .eabi_attribute 8, 0
; CORTEX-M4-SOFT: .eabi_attribute 9, 2
-; CORTEX-M4-SOFT: .fpu vfpv4-d16
+; CORTEX-M4-SOFT: .fpu fpv4-sp-d16
; CORTEX-M4-SOFT-NOT: .eabi_attribute 19
;; We default to IEEE 754 compliance
; CORTEX-M4-SOFT: .eabi_attribute 20, 1
@@ -953,7 +953,7 @@
; CORTEX-M4-HARD: .eabi_attribute 7, 77
; CORTEX-M4-HARD: .eabi_attribute 8, 0
; CORTEX-M4-HARD: .eabi_attribute 9, 2
-; CORTEX-M4-HARD: .fpu vfpv4-d16
+; CORTEX-M4-HARD: .fpu fpv4-sp-d16
; CORTEX-M4-HARD-NOT: .eabi_attribute 19
;; We default to IEEE 754 compliance
; CORTEX-M4-HARD: .eabi_attribute 20, 1
@@ -984,7 +984,7 @@
; CORTEX-M7: .eabi_attribute 8, 0
; CORTEX-M7: .eabi_attribute 9, 2
; CORTEX-M7-SOFT-NOT: .fpu
-; CORTEX-M7-SINGLE: .fpu fpv5-d16
+; CORTEX-M7-SINGLE: .fpu fpv5-sp-d16
; CORTEX-M7-DOUBLE: .fpu fpv5-d16
; CORTEX-M7: .eabi_attribute 17, 1
; CORTEX-M7-NOT: .eabi_attribute 19
diff --git a/test/CodeGen/ARM/ifcvt-callback.ll b/test/CodeGen/ARM/ifcvt-callback.ll
new file mode 100644
index 0000000000000..62a66e745b395
--- /dev/null
+++ b/test/CodeGen/ARM/ifcvt-callback.ll
@@ -0,0 +1,22 @@
+; RUN: llc -march thumb %s -o - | FileCheck %s
+
+; This test checks that if-conversion pass is unconditionally added to the pass
+; pipeline and is conditionally executed based on the per-function targert-cpu
+; attribute.
+
+; CHECK: ite eq
+
+define i32 @test_ifcvt(i32 %a, i32 %b) #0 {
+ %tmp2 = icmp eq i32 %a, 0
+ br i1 %tmp2, label %cond_false, label %cond_true
+
+cond_true:
+ %tmp5 = add i32 %b, 1
+ ret i32 %tmp5
+
+cond_false:
+ %tmp7 = add i32 %b, -1
+ ret i32 %tmp7
+}
+
+attributes #0 = { "target-cpu"="cortex-a8" }
diff --git a/test/CodeGen/ARM/jump-table-islands-split.ll b/test/CodeGen/ARM/jump-table-islands-split.ll
new file mode 100644
index 0000000000000..deba21b4dbb1a
--- /dev/null
+++ b/test/CodeGen/ARM/jump-table-islands-split.ll
@@ -0,0 +1,52 @@
+; RUN: llc -mtriple=thumbv7s-apple-ios8.0 -o - %s | FileCheck %s
+
+declare void @foo(double)
+declare i32 @llvm.arm.space(i32, i32)
+
+; The constpool entry used to call @foo should be directly between where we want
+; the tbb and its table. Fortunately, the flow is simple enough that we can
+; eliminate the entry calculation (ADD) and use the ADR as the base.
+;
+; I'm hoping this won't be fragile, but if it does break the most likely fix is
+; adjusting the @llvm.arm.space call slightly. If this happens too many times
+; the test should probably be removed.
+define i32 @test_jumptable_not_adjacent(i1 %tst, i32 %sw, i32 %l) {
+; CHECK-LABEL: test_jumptable_not_adjacent:
+; CHECK: vldr {{d[0-9]+}}, [[DBL_CONST:LCPI[0-9]+_[0-9]+]]
+; [...]
+; CHECK: adr.w r[[BASE:[0-9]+]], [[JUMP_TABLE:LJTI[0-9]+_[0-9]+]]
+; CHECK-NOT: r[[BASE]]
+
+; CHECK: [[TBB_KEY:LCPI[0-9]+_[0-9]+]]:
+; CHECK-NEXT: tbb [r[[BASE]], {{r[0-9]+}}]
+
+; CHECK: [[DBL_CONST]]:
+; CHECK: .long
+; CHECK: .long
+; CHECK: [[JUMP_TABLE]]:
+; CHECK: .byte (LBB{{[0-9]+}}_{{[0-9]+}}-([[TBB_KEY]]+4)
+
+ br label %complex
+
+complex:
+ call void @foo(double 12345.0)
+ call i32 @llvm.arm.space(i32 970, i32 undef)
+ switch i32 %sw, label %second [ i32 0, label %other
+ i32 1, label %third
+ i32 2, label %end
+ i32 3, label %other ]
+
+second:
+ ret i32 43
+third:
+ ret i32 0
+
+other:
+ call void @bar()
+ unreachable
+
+end:
+ ret i32 42
+}
+
+declare void @bar()
diff --git a/test/CodeGen/ARM/jump-table-islands.ll b/test/CodeGen/ARM/jump-table-islands.ll
new file mode 100644
index 0000000000000..6b4f174c09288
--- /dev/null
+++ b/test/CodeGen/ARM/jump-table-islands.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mtriple=armv7-apple-ios8.0 -o - %s | FileCheck %s
+
+%BigInt = type i5500
+
+define %BigInt @test_moved_jumptable(i1 %tst, i32 %sw, %BigInt %l) {
+; CHECK-LABEL: test_moved_jumptable:
+
+; CHECK: adr {{r[0-9]+}}, [[JUMP_TABLE:LJTI[0-9]+_[0-9]+]]
+; CHECK: b [[SKIP_TABLE:LBB[0-9]+_[0-9]+]]
+
+; CHECK: [[JUMP_TABLE]]:
+; CHECK: .data_region jt32
+; CHECK: .long LBB{{[0-9]+_[0-9]+}}-[[JUMP_TABLE]]
+
+; CHECK: [[SKIP_TABLE]]:
+; CHECK: add pc, {{r[0-9]+}}, {{r[0-9]+}}
+ br i1 %tst, label %simple, label %complex
+
+simple:
+ br label %end
+
+complex:
+ switch i32 %sw, label %simple [ i32 0, label %other
+ i32 1, label %third
+ i32 5, label %end
+ i32 6, label %other ]
+
+third:
+ ret %BigInt 0
+
+other:
+ call void @bar()
+ unreachable
+
+end:
+ %val = phi %BigInt [ %l, %complex ], [ -1, %simple ]
+ ret %BigInt %val
+}
+
+declare void @bar()
diff --git a/test/CodeGen/ARM/jumptable-label.ll b/test/CodeGen/ARM/jumptable-label.ll
index 49d698672f824..2ba90dc973659 100644
--- a/test/CodeGen/ARM/jumptable-label.ll
+++ b/test/CodeGen/ARM/jumptable-label.ll
@@ -2,8 +2,8 @@
; test that we print the label of a bb that is only used in a jump table.
-; CHECK: .long LBB0_2
-; CHECK: LBB0_2:
+; CHECK: .long [[JUMPTABLE_DEST:LBB[0-9]+_[0-9]+]]
+; CHECK: [[JUMPTABLE_DEST]]:
define i32 @calculate() {
entry:
diff --git a/test/CodeGen/ARM/ldrd.ll b/test/CodeGen/ARM/ldrd.ll
index 7ce846844e051..a8070ea68aa2b 100644
--- a/test/CodeGen/ARM/ldrd.ll
+++ b/test/CodeGen/ARM/ldrd.ll
@@ -92,6 +92,22 @@ entry:
ret void
}
+declare void @extfunc(i32, i32, i32, i32)
+
+; CHECK-LABEL: Func2:
+; A8: ldrd
+; A8: blx
+; A8: pop
+define void @Func2(i32* %p) {
+entry:
+ %addr0 = getelementptr i32, i32* %p, i32 0
+ %addr1 = getelementptr i32, i32* %p, i32 1
+ %v0 = load i32, i32* %addr0
+ %v1 = load i32, i32* %addr1
+ ; try to force %v0/%v1 into non-adjacent registers
+ call void @extfunc(i32 %v0, i32 0, i32 0, i32 %v1)
+ ret void
+}
declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
diff --git a/test/CodeGen/ARM/named-reg-alloc.ll b/test/CodeGen/ARM/named-reg-alloc.ll
index 380cf39734ff1..d41fa64882c88 100644
--- a/test/CodeGen/ARM/named-reg-alloc.ll
+++ b/test/CodeGen/ARM/named-reg-alloc.ll
@@ -4,7 +4,7 @@
define i32 @get_stack() nounwind {
entry:
; FIXME: Include an allocatable-specific error message
-; CHECK: Invalid register name global variable
+; CHECK: Invalid register name "r5".
%sp = call i32 @llvm.read_register.i32(metadata !0)
ret i32 %sp
}
diff --git a/test/CodeGen/ARM/named-reg-notareg.ll b/test/CodeGen/ARM/named-reg-notareg.ll
index 3ac03f4fdaaa9..45cb38f30f355 100644
--- a/test/CodeGen/ARM/named-reg-notareg.ll
+++ b/test/CodeGen/ARM/named-reg-notareg.ll
@@ -3,7 +3,7 @@
define i32 @get_stack() nounwind {
entry:
-; CHECK: Invalid register name global variable
+; CHECK: Invalid register name "notareg".
%sp = call i32 @llvm.read_register.i32(metadata !0)
ret i32 %sp
}
diff --git a/test/CodeGen/ARM/special-reg-acore.ll b/test/CodeGen/ARM/special-reg-acore.ll
new file mode 100644
index 0000000000000..3d65ff44bfb00
--- /dev/null
+++ b/test/CodeGen/ARM/special-reg-acore.ll
@@ -0,0 +1,78 @@
+; RUN: llc < %s -mtriple=arm-none-eabi -mcpu=cortex-a8 2>&1 | FileCheck %s --check-prefix=ACORE
+; RUN: not llc < %s -mtriple=thumb-none-eabi -mcpu=cortex-m4 2>&1 | FileCheck %s --check-prefix=MCORE
+
+; MCORE: LLVM ERROR: Invalid register name "cpsr".
+
+define i32 @read_cpsr() nounwind {
+ ; ACORE-LABEL: read_cpsr:
+ ; ACORE: mrs r0, apsr
+ %reg = call i32 @llvm.read_register.i32(metadata !1)
+ ret i32 %reg
+}
+
+define i32 @read_aclass_registers() nounwind {
+entry:
+ ; ACORE-LABEL: read_aclass_registers:
+ ; ACORE: mrs r0, apsr
+ ; ACORE: mrs r1, spsr
+
+ %0 = call i32 @llvm.read_register.i32(metadata !0)
+ %1 = call i32 @llvm.read_register.i32(metadata !1)
+ %add1 = add i32 %1, %0
+ %2 = call i32 @llvm.read_register.i32(metadata !2)
+ %add2 = add i32 %add1, %2
+ ret i32 %add2
+}
+
+define void @write_aclass_registers(i32 %x) nounwind {
+entry:
+ ; ACORE-LABEL: write_aclass_registers:
+ ; ACORE: msr APSR_nzcvq, r0
+ ; ACORE: msr APSR_g, r0
+ ; ACORE: msr APSR_nzcvqg, r0
+ ; ACORE: msr CPSR_c, r0
+ ; ACORE: msr CPSR_x, r0
+ ; ACORE: msr APSR_g, r0
+ ; ACORE: msr APSR_nzcvq, r0
+ ; ACORE: msr CPSR_fsxc, r0
+ ; ACORE: msr SPSR_c, r0
+ ; ACORE: msr SPSR_x, r0
+ ; ACORE: msr SPSR_s, r0
+ ; ACORE: msr SPSR_f, r0
+ ; ACORE: msr SPSR_fsxc, r0
+
+ call void @llvm.write_register.i32(metadata !3, i32 %x)
+ call void @llvm.write_register.i32(metadata !4, i32 %x)
+ call void @llvm.write_register.i32(metadata !5, i32 %x)
+ call void @llvm.write_register.i32(metadata !6, i32 %x)
+ call void @llvm.write_register.i32(metadata !7, i32 %x)
+ call void @llvm.write_register.i32(metadata !8, i32 %x)
+ call void @llvm.write_register.i32(metadata !9, i32 %x)
+ call void @llvm.write_register.i32(metadata !10, i32 %x)
+ call void @llvm.write_register.i32(metadata !11, i32 %x)
+ call void @llvm.write_register.i32(metadata !12, i32 %x)
+ call void @llvm.write_register.i32(metadata !13, i32 %x)
+ call void @llvm.write_register.i32(metadata !14, i32 %x)
+ call void @llvm.write_register.i32(metadata !15, i32 %x)
+ ret void
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+declare void @llvm.write_register.i32(metadata, i32) nounwind
+
+!0 = !{!"apsr"}
+!1 = !{!"cpsr"}
+!2 = !{!"spsr"}
+!3 = !{!"apsr_nzcvq"}
+!4 = !{!"apsr_g"}
+!5 = !{!"apsr_nzcvqg"}
+!6 = !{!"cpsr_c"}
+!7 = !{!"cpsr_x"}
+!8 = !{!"cpsr_s"}
+!9 = !{!"cpsr_f"}
+!10 = !{!"cpsr_cxsf"}
+!11 = !{!"spsr_c"}
+!12 = !{!"spsr_x"}
+!13 = !{!"spsr_s"}
+!14 = !{!"spsr_f"}
+!15 = !{!"spsr_cxsf"}
diff --git a/test/CodeGen/ARM/special-reg-mcore.ll b/test/CodeGen/ARM/special-reg-mcore.ll
new file mode 100644
index 0000000000000..686da0f6b8397
--- /dev/null
+++ b/test/CodeGen/ARM/special-reg-mcore.ll
@@ -0,0 +1,143 @@
+; RUN: llc < %s -mtriple=thumb-none-eabi -mcpu=cortex-m4 2>&1 | FileCheck %s --check-prefix=MCORE
+; RUN: not llc < %s -mtriple=thumb-none-eabi -mcpu=cortex-m3 2>&1 | FileCheck %s --check-prefix=M3CORE
+; RUN: not llc < %s -mtriple=arm-none-eabi -mcpu=cortex-a8 2>&1 | FileCheck %s --check-prefix=ACORE
+
+; ACORE: LLVM ERROR: Invalid register name "control".
+; M3CORE: LLVM ERROR: Invalid register name "control".
+
+define i32 @read_mclass_registers() nounwind {
+entry:
+ ; MCORE-LABEL: read_mclass_registers:
+ ; MCORE: mrs r0, apsr
+ ; MCORE: mrs r1, iapsr
+ ; MCORE: mrs r1, eapsr
+ ; MCORE: mrs r1, xpsr
+ ; MCORE: mrs r1, ipsr
+ ; MCORE: mrs r1, epsr
+ ; MCORE: mrs r1, iepsr
+ ; MCORE: mrs r1, msp
+ ; MCORE: mrs r1, psp
+ ; MCORE: mrs r1, primask
+ ; MCORE: mrs r1, basepri
+ ; MCORE: mrs r1, basepri_max
+ ; MCORE: mrs r1, faultmask
+ ; MCORE: mrs r1, control
+
+ %0 = call i32 @llvm.read_register.i32(metadata !0)
+ %1 = call i32 @llvm.read_register.i32(metadata !4)
+ %add1 = add i32 %1, %0
+ %2 = call i32 @llvm.read_register.i32(metadata !8)
+ %add2 = add i32 %add1, %2
+ %3 = call i32 @llvm.read_register.i32(metadata !12)
+ %add3 = add i32 %add2, %3
+ %4 = call i32 @llvm.read_register.i32(metadata !16)
+ %add4 = add i32 %add3, %4
+ %5 = call i32 @llvm.read_register.i32(metadata !17)
+ %add5 = add i32 %add4, %5
+ %6 = call i32 @llvm.read_register.i32(metadata !18)
+ %add6 = add i32 %add5, %6
+ %7 = call i32 @llvm.read_register.i32(metadata !19)
+ %add7 = add i32 %add6, %7
+ %8 = call i32 @llvm.read_register.i32(metadata !20)
+ %add8 = add i32 %add7, %8
+ %9 = call i32 @llvm.read_register.i32(metadata !21)
+ %add9 = add i32 %add8, %9
+ %10 = call i32 @llvm.read_register.i32(metadata !22)
+ %add10 = add i32 %add9, %10
+ %11 = call i32 @llvm.read_register.i32(metadata !23)
+ %add11 = add i32 %add10, %11
+ %12 = call i32 @llvm.read_register.i32(metadata !24)
+ %add12 = add i32 %add11, %12
+ %13 = call i32 @llvm.read_register.i32(metadata !25)
+ %add13 = add i32 %add12, %13
+ ret i32 %add13
+}
+
+define void @write_mclass_registers(i32 %x) nounwind {
+entry:
+ ; MCORE-LABEL: write_mclass_registers:
+ ; MCORE: msr apsr_nzcvqg, r0
+ ; MCORE: msr apsr_nzcvq, r0
+ ; MCORE: msr apsr_g, r0
+ ; MCORE: msr apsr_nzcvqg, r0
+ ; MCORE: msr iapsr_nzcvqg, r0
+ ; MCORE: msr iapsr_nzcvq, r0
+ ; MCORE: msr iapsr_g, r0
+ ; MCORE: msr iapsr_nzcvqg, r0
+ ; MCORE: msr eapsr_nzcvqg, r0
+ ; MCORE: msr eapsr_nzcvq, r0
+ ; MCORE: msr eapsr_g, r0
+ ; MCORE: msr eapsr_nzcvqg, r0
+ ; MCORE: msr xpsr_nzcvqg, r0
+ ; MCORE: msr xpsr_nzcvq, r0
+ ; MCORE: msr xpsr_g, r0
+ ; MCORE: msr xpsr_nzcvqg, r0
+ ; MCORE: msr ipsr, r0
+ ; MCORE: msr epsr, r0
+ ; MCORE: msr iepsr, r0
+ ; MCORE: msr msp, r0
+ ; MCORE: msr psp, r0
+ ; MCORE: msr primask, r0
+ ; MCORE: msr basepri, r0
+ ; MCORE: msr basepri_max, r0
+ ; MCORE: msr faultmask, r0
+ ; MCORE: msr control, r0
+
+ call void @llvm.write_register.i32(metadata !0, i32 %x)
+ call void @llvm.write_register.i32(metadata !1, i32 %x)
+ call void @llvm.write_register.i32(metadata !2, i32 %x)
+ call void @llvm.write_register.i32(metadata !3, i32 %x)
+ call void @llvm.write_register.i32(metadata !4, i32 %x)
+ call void @llvm.write_register.i32(metadata !5, i32 %x)
+ call void @llvm.write_register.i32(metadata !6, i32 %x)
+ call void @llvm.write_register.i32(metadata !7, i32 %x)
+ call void @llvm.write_register.i32(metadata !8, i32 %x)
+ call void @llvm.write_register.i32(metadata !9, i32 %x)
+ call void @llvm.write_register.i32(metadata !10, i32 %x)
+ call void @llvm.write_register.i32(metadata !11, i32 %x)
+ call void @llvm.write_register.i32(metadata !12, i32 %x)
+ call void @llvm.write_register.i32(metadata !13, i32 %x)
+ call void @llvm.write_register.i32(metadata !14, i32 %x)
+ call void @llvm.write_register.i32(metadata !15, i32 %x)
+ call void @llvm.write_register.i32(metadata !16, i32 %x)
+ call void @llvm.write_register.i32(metadata !17, i32 %x)
+ call void @llvm.write_register.i32(metadata !18, i32 %x)
+ call void @llvm.write_register.i32(metadata !19, i32 %x)
+ call void @llvm.write_register.i32(metadata !20, i32 %x)
+ call void @llvm.write_register.i32(metadata !21, i32 %x)
+ call void @llvm.write_register.i32(metadata !22, i32 %x)
+ call void @llvm.write_register.i32(metadata !23, i32 %x)
+ call void @llvm.write_register.i32(metadata !24, i32 %x)
+ call void @llvm.write_register.i32(metadata !25, i32 %x)
+ ret void
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+declare void @llvm.write_register.i32(metadata, i32) nounwind
+
+!0 = !{!"apsr"}
+!1 = !{!"apsr_nzcvq"}
+!2 = !{!"apsr_g"}
+!3 = !{!"apsr_nzcvqg"}
+!4 = !{!"iapsr"}
+!5 = !{!"iapsr_nzcvq"}
+!6 = !{!"iapsr_g"}
+!7 = !{!"iapsr_nzcvqg"}
+!8 = !{!"eapsr"}
+!9 = !{!"eapsr_nzcvq"}
+!10 = !{!"eapsr_g"}
+!11 = !{!"eapsr_nzcvqg"}
+!12 = !{!"xpsr"}
+!13 = !{!"xpsr_nzcvq"}
+!14 = !{!"xpsr_g"}
+!15 = !{!"xpsr_nzcvqg"}
+!16 = !{!"ipsr"}
+!17 = !{!"epsr"}
+!18 = !{!"iepsr"}
+!19 = !{!"msp"}
+!20 = !{!"psp"}
+!21 = !{!"primask"}
+!22 = !{!"basepri"}
+!23 = !{!"basepri_max"}
+!24 = !{!"faultmask"}
+!25 = !{!"control"}
diff --git a/test/CodeGen/ARM/special-reg.ll b/test/CodeGen/ARM/special-reg.ll
new file mode 100644
index 0000000000000..7ccb490f5d4a6
--- /dev/null
+++ b/test/CodeGen/ARM/special-reg.ll
@@ -0,0 +1,78 @@
+; RUN: llc < %s -mtriple=arm-none-eabi -mcpu=cortex-a8 2>&1 | FileCheck %s --check-prefix=ARM --check-prefix=ACORE
+; RUN: llc < %s -mtriple=thumb-none-eabi -mcpu=cortex-m4 2>&1 | FileCheck %s --check-prefix=ARM --check-prefix=MCORE
+
+define i32 @read_i32_encoded_register() nounwind {
+entry:
+; ARM-LABEL: read_i32_encoded_register:
+; ARM: mrc p1, #2, r0, c3, c4, #5
+ %reg = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %reg
+}
+
+define i64 @read_i64_encoded_register() nounwind {
+entry:
+; ARM-LABEL: read_i64_encoded_register:
+; ARM: mrrc p1, #2, r0, r1, c3
+ %reg = call i64 @llvm.read_register.i64(metadata !1)
+ ret i64 %reg
+}
+
+define i32 @read_apsr() nounwind {
+entry:
+; ARM-LABEL: read_apsr:
+; ARM: mrs r0, apsr
+ %reg = call i32 @llvm.read_register.i32(metadata !2)
+ ret i32 %reg
+}
+
+define i32 @read_fpscr() nounwind {
+entry:
+; ARM-LABEL: read_fpscr:
+; ARM: vmrs r0, fpscr
+ %reg = call i32 @llvm.read_register.i32(metadata !3)
+ ret i32 %reg
+}
+
+define void @write_i32_encoded_register(i32 %x) nounwind {
+entry:
+; ARM-LABEL: write_i32_encoded_register:
+; ARM: mcr p1, #2, r0, c3, c4, #5
+ call void @llvm.write_register.i32(metadata !0, i32 %x)
+ ret void
+}
+
+define void @write_i64_encoded_register(i64 %x) nounwind {
+entry:
+; ARM-LABEL: write_i64_encoded_register:
+; ARM: mcrr p1, #2, r0, r1, c3
+ call void @llvm.write_register.i64(metadata !1, i64 %x)
+ ret void
+}
+
+define void @write_apsr(i32 %x) nounwind {
+entry:
+; ARM-LABEL: write_apsr:
+; ACORE: msr APSR_nzcvq, r0
+; MCORE: msr apsr_nzcvq, r0
+ call void @llvm.write_register.i32(metadata !4, i32 %x)
+ ret void
+}
+
+define void @write_fpscr(i32 %x) nounwind {
+entry:
+; ARM-LABEL: write_fpscr:
+; ARM: vmsr fpscr, r0
+ call void @llvm.write_register.i32(metadata !3, i32 %x)
+ ret void
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+declare i64 @llvm.read_register.i64(metadata) nounwind
+declare void @llvm.write_register.i32(metadata, i32) nounwind
+declare void @llvm.write_register.i64(metadata, i64) nounwind
+
+!0 = !{!"cp1:2:c3:c4:5"}
+!1 = !{!"cp1:2:c3"}
+!2 = !{!"apsr"}
+!3 = !{!"fpscr"}
+!4 = !{!"apsr_nzcvq"}
diff --git a/test/CodeGen/BPF/alu8.ll b/test/CodeGen/BPF/alu8.ll
index 0233225f81b58..c1c2bd29f2472 100644
--- a/test/CodeGen/BPF/alu8.ll
+++ b/test/CodeGen/BPF/alu8.ll
@@ -1,5 +1,4 @@
-; RUN: llc -march=bpf -show-mc-encoding < %s | FileCheck %s
-; test little endian only for now
+; RUN: llc -march=bpfel -show-mc-encoding < %s | FileCheck %s
define i8 @mov(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: mov:
diff --git a/test/CodeGen/BPF/atomics.ll b/test/CodeGen/BPF/atomics.ll
index 2f9730ddddef4..a2e17d291968a 100644
--- a/test/CodeGen/BPF/atomics.ll
+++ b/test/CodeGen/BPF/atomics.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=bpf -verify-machineinstrs -show-mc-encoding | FileCheck %s
-; test little endian only for now
+; RUN: llc < %s -march=bpfel -verify-machineinstrs -show-mc-encoding | FileCheck %s
; CHECK-LABEL: test_load_add_32
; CHECK: xadd32
diff --git a/test/CodeGen/BPF/basictest.ll b/test/CodeGen/BPF/basictest.ll
index 2a2d49878a633..82feb43d005cf 100644
--- a/test/CodeGen/BPF/basictest.ll
+++ b/test/CodeGen/BPF/basictest.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf | FileCheck %s
+; RUN: llc < %s -march=bpfel | FileCheck %s
define i32 @test0(i32 %X) {
%tmp.1 = add i32 %X, 1
diff --git a/test/CodeGen/BPF/cc_args.ll b/test/CodeGen/BPF/cc_args.ll
index 5085fe5684eb3..8e3f8604ac881 100644
--- a/test/CodeGen/BPF/cc_args.ll
+++ b/test/CodeGen/BPF/cc_args.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
-; test little endian only for now
+; RUN: llc < %s -march=bpfel -show-mc-encoding | FileCheck %s
define void @test() #0 {
entry:
diff --git a/test/CodeGen/BPF/cc_args_be.ll b/test/CodeGen/BPF/cc_args_be.ll
new file mode 100644
index 0000000000000..59a7439728ba8
--- /dev/null
+++ b/test/CodeGen/BPF/cc_args_be.ll
@@ -0,0 +1,96 @@
+; RUN: llc < %s -march=bpfeb -show-mc-encoding | FileCheck %s
+; test big endian
+
+define void @test() #0 {
+entry:
+; CHECK: test:
+
+; CHECK: mov r1, 123 # encoding: [0xb7,0x10,0x00,0x00,0x00,0x00,0x00,0x7b]
+; CHECK: call f_i16
+ call void @f_i16(i16 123)
+
+; CHECK: mov r1, 12345678 # encoding: [0xb7,0x10,0x00,0x00,0x00,0xbc,0x61,0x4e]
+; CHECK: call f_i32
+ call void @f_i32(i32 12345678)
+
+; CHECK: ld_64 r1, 72623859790382856 # encoding: [0x18,0x10,0x00,0x00,0x05,0x06,0x07,0x08,0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04]
+; CHECK: call f_i64
+ call void @f_i64(i64 72623859790382856)
+
+; CHECK: mov r1, 1234
+; CHECK: mov r2, 5678
+; CHECK: call f_i32_i32
+ call void @f_i32_i32(i32 1234, i32 5678)
+
+; CHECK: mov r1, 2
+; CHECK: mov r2, 3
+; CHECK: mov r3, 4
+; CHECK: call f_i16_i32_i16
+ call void @f_i16_i32_i16(i16 2, i32 3, i16 4)
+
+; CHECK: mov r1, 5
+; CHECK: ld_64 r2, 7262385979038285
+; CHECK: mov r3, 6
+; CHECK: call f_i16_i64_i16
+ call void @f_i16_i64_i16(i16 5, i64 7262385979038285, i16 6)
+
+ ret void
+}
+
+@g_i16 = common global i16 0, align 2
+@g_i32 = common global i32 0, align 2
+@g_i64 = common global i64 0, align 4
+
+define void @f_i16(i16 %a) #0 {
+; CHECK: f_i16:
+; CHECK: sth 0(r2), r1 # encoding: [0x6b,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ store volatile i16 %a, i16* @g_i16, align 2
+ ret void
+}
+
+define void @f_i32(i32 %a) #0 {
+; CHECK: f_i32:
+; CHECK: sth 2(r2), r1 # encoding: [0x6b,0x21,0x00,0x02,0x00,0x00,0x00,0x00]
+; CHECK: sth 0(r2), r1 # encoding: [0x6b,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ store volatile i32 %a, i32* @g_i32, align 2
+ ret void
+}
+
+define void @f_i64(i64 %a) #0 {
+; CHECK: f_i64:
+; CHECK: stw 4(r2), r1 # encoding: [0x63,0x21,0x00,0x04,0x00,0x00,0x00,0x00]
+; CHECK: stw 0(r2), r1
+ store volatile i64 %a, i64* @g_i64, align 2
+ ret void
+}
+
+define void @f_i32_i32(i32 %a, i32 %b) #0 {
+; CHECK: f_i32_i32:
+; CHECK: stw 0(r3), r1
+ store volatile i32 %a, i32* @g_i32, align 4
+; CHECK: stw 0(r3), r2
+ store volatile i32 %b, i32* @g_i32, align 4
+ ret void
+}
+
+define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 {
+; CHECK: f_i16_i32_i16:
+; CHECK: sth 0(r4), r1
+ store volatile i16 %a, i16* @g_i16, align 2
+; CHECK: stw 0(r1), r2
+ store volatile i32 %b, i32* @g_i32, align 4
+; CHECK: sth 0(r4), r3
+ store volatile i16 %c, i16* @g_i16, align 2
+ ret void
+}
+
+define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 {
+; CHECK: f_i16_i64_i16:
+; CHECK: sth 0(r4), r1
+ store volatile i16 %a, i16* @g_i16, align 2
+; CHECK: std 0(r1), r2 # encoding: [0x7b,0x12,0x00,0x00,0x00,0x00,0x00,0x00]
+ store volatile i64 %b, i64* @g_i64, align 8
+; CHECK: sth 0(r4), r3
+ store volatile i16 %c, i16* @g_i16, align 2
+ ret void
+}
diff --git a/test/CodeGen/BPF/cc_ret.ll b/test/CodeGen/BPF/cc_ret.ll
index e32b17bcc61c3..09574922f325b 100644
--- a/test/CodeGen/BPF/cc_ret.ll
+++ b/test/CodeGen/BPF/cc_ret.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf | FileCheck %s
+; RUN: llc < %s -march=bpfel | FileCheck %s
define void @test() #0 {
entry:
diff --git a/test/CodeGen/BPF/ex1.ll b/test/CodeGen/BPF/ex1.ll
index be038e9a3d8c7..546e5d49da69e 100644
--- a/test/CodeGen/BPF/ex1.ll
+++ b/test/CodeGen/BPF/ex1.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf | FileCheck %s
+; RUN: llc < %s -march=bpfel | FileCheck %s
%struct.bpf_context = type { i64, i64, i64, i64, i64, i64, i64 }
%struct.sk_buff = type { i64, i64, i64, i64, i64, i64, i64 }
diff --git a/test/CodeGen/BPF/intrinsics.ll b/test/CodeGen/BPF/intrinsics.ll
index 98b57deb7c8dd..483473e922fc5 100644
--- a/test/CodeGen/BPF/intrinsics.ll
+++ b/test/CodeGen/BPF/intrinsics.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
+; RUN: llc < %s -march=bpfel -show-mc-encoding | FileCheck %s
; Function Attrs: nounwind uwtable
define i32 @ld_b(i64 %foo, i64* nocapture %bar, i8* %ctx, i8* %ctx2) #0 {
diff --git a/test/CodeGen/BPF/load.ll b/test/CodeGen/BPF/load.ll
index 03fb17c965b5b..d4ba315b5f18b 100644
--- a/test/CodeGen/BPF/load.ll
+++ b/test/CodeGen/BPF/load.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf | FileCheck %s
+; RUN: llc < %s -march=bpfel | FileCheck %s
define i16 @am1(i16* %a) nounwind {
%1 = load i16, i16* %a
diff --git a/test/CodeGen/BPF/loops.ll b/test/CodeGen/BPF/loops.ll
index 4798d78842cac..00be54b3bac57 100644
--- a/test/CodeGen/BPF/loops.ll
+++ b/test/CodeGen/BPF/loops.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf | FileCheck %s
+; RUN: llc < %s -march=bpfel | FileCheck %s
define zeroext i16 @add(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
entry:
diff --git a/test/CodeGen/BPF/sanity.ll b/test/CodeGen/BPF/sanity.ll
index 09a6b65d08541..7f0ef889ff99d 100644
--- a/test/CodeGen/BPF/sanity.ll
+++ b/test/CodeGen/BPF/sanity.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf | FileCheck %s
+; RUN: llc < %s -march=bpfel | FileCheck %s
@foo_printf.fmt = private unnamed_addr constant [9 x i8] c"hello \0A\00", align 1
diff --git a/test/CodeGen/BPF/setcc.ll b/test/CodeGen/BPF/setcc.ll
index eabb6c9bf2d6c..f6c6db6c6836e 100644
--- a/test/CodeGen/BPF/setcc.ll
+++ b/test/CodeGen/BPF/setcc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=bpf < %s | FileCheck %s
+; RUN: llc -march=bpfel < %s | FileCheck %s
define i16 @sccweqand(i16 %a, i16 %b) nounwind {
%t1 = and i16 %a, %b
diff --git a/test/CodeGen/BPF/shifts.ll b/test/CodeGen/BPF/shifts.ll
index 898ae2d46123d..cb000b92fcd92 100644
--- a/test/CodeGen/BPF/shifts.ll
+++ b/test/CodeGen/BPF/shifts.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
-; test little endian only for now
+; RUN: llc < %s -march=bpfel -show-mc-encoding | FileCheck %s
define zeroext i8 @lshr8(i8 zeroext %a, i8 zeroext %cnt) nounwind readnone {
entry:
diff --git a/test/CodeGen/BPF/sockex2.ll b/test/CodeGen/BPF/sockex2.ll
index 6ae5e1c8d6bf4..d372a5982f686 100644
--- a/test/CodeGen/BPF/sockex2.ll
+++ b/test/CodeGen/BPF/sockex2.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
-; test little endian only for now
+; RUN: llc < %s -march=bpfel -show-mc-encoding | FileCheck %s
%struct.bpf_map_def = type { i32, i32, i32, i32 }
%struct.sk_buff = type opaque
diff --git a/test/CodeGen/Generic/stop-after.ll b/test/CodeGen/Generic/stop-after.ll
index 557e097840af0..791378c3737d2 100644
--- a/test/CodeGen/Generic/stop-after.ll
+++ b/test/CodeGen/Generic/stop-after.ll
@@ -1,9 +1,10 @@
; RUN: llc < %s -debug-pass=Structure -stop-after=loop-reduce -o /dev/null 2>&1 | FileCheck %s -check-prefix=STOP
; RUN: llc < %s -debug-pass=Structure -start-after=loop-reduce -o /dev/null 2>&1 | FileCheck %s -check-prefix=START
-; STOP: -loop-reduce -print-module
+; STOP: -loop-reduce
; STOP: Loop Strength Reduction
; STOP-NEXT: Machine Function Analysis
+; STOP-NEXT: MIR Printing Pass
; START: -machine-branch-prob -gc-lowering
; START: FunctionPass Manager
diff --git a/test/CodeGen/Hexagon/args.ll b/test/CodeGen/Hexagon/args.ll
index aea4ffe2eee52..1c470f68aa272 100644
--- a/test/CodeGen/Hexagon/args.ll
+++ b/test/CodeGen/Hexagon/args.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-dfa-sched -disable-hexagon-misched < %s | FileCheck %s
-; CHECK: memw(r29{{ *}}+{{ *}}#0){{ *}}={{ *}}#7
-; CHECK: r1:0 = combine(#2, #1)
-; CHECK: r3:2 = combine(#4, #3)
+; RUN: llc -march=hexagon < %s | FileCheck %s
; CHECK: r5:4 = combine(#6, #5)
+; CHECK: r3:2 = combine(#4, #3)
+; CHECK: r1:0 = combine(#2, #1)
+; CHECK: memw(r29{{ *}}+{{ *}}#0){{ *}}={{ *}}#7
define void @foo() nounwind {
diff --git a/test/CodeGen/Hexagon/calling-conv.ll b/test/CodeGen/Hexagon/calling-conv.ll
deleted file mode 100644
index 7133c1ae7aad9..0000000000000
--- a/test/CodeGen/Hexagon/calling-conv.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv5 <%s | \
-; RUN: FileCheck %s --check-prefix=CHECK-ONE
-; RUN: llc -march=hexagon -mcpu=hexagonv5 <%s | \
-; RUN: FileCheck %s --check-prefix=CHECK-TWO
-; RUN: llc -march=hexagon -mcpu=hexagonv5 <%s | \
-; RUN: FileCheck %s --check-prefix=CHECK-THREE
-
-%struct.test_struct = type { i32, i8, i64 }
-%struct.test_struct_long = type { i8, i64 }
-
-@mystruct = external global %struct.test_struct*, align 4
-
-; CHECK-ONE: memw(r29+#48) = r2
-; CHECK-TWO: memw(r29+#52) = r2
-; CHECK-THREE: memw(r29+#56) = r2
-; Function Attrs: nounwind
-define void @foo(%struct.test_struct* noalias sret %agg.result, i32 %a, i8 zeroext %c, %struct.test_struct* byval %s, %struct.test_struct_long* byval %t) #0 {
-entry:
- %a.addr = alloca i32, align 4
- %c.addr = alloca i8, align 1
- %z = alloca i32, align 4
- %ret = alloca %struct.test_struct, align 8
- store i32 %a, i32* %a.addr, align 4
- store i8 %c, i8* %c.addr, align 1
- %0 = bitcast i32* %z to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #1
- store i32 45, i32* %z, align 4
- %1 = bitcast %struct.test_struct* %ret to i8*
- call void @llvm.lifetime.start(i64 16, i8* %1) #1
- %2 = load i32, i32* %z, align 4
- %3 = load %struct.test_struct*, %struct.test_struct** @mystruct, align 4
- %4 = load %struct.test_struct*, %struct.test_struct** @mystruct, align 4
- %5 = load i8, i8* %c.addr, align 1
- %6 = load i32, i32* %a.addr, align 4
- %conv = sext i32 %6 to i64
- %add = add nsw i64 %conv, 1
- %7 = load i32, i32* %a.addr, align 4
- %add1 = add nsw i32 %7, 2
- %8 = load i32, i32* %a.addr, align 4
- %conv2 = sext i32 %8 to i64
- %add3 = add nsw i64 %conv2, 3
- %9 = load i8, i8* %c.addr, align 1
- %10 = load i8, i8* %c.addr, align 1
- %11 = load i8, i8* %c.addr, align 1
- %12 = load i32, i32* %z, align 4
- call void @bar(%struct.test_struct* sret %ret, i32 %2, %struct.test_struct* byval %3, %struct.test_struct* byval %4, i8 zeroext %5, i64 %add, i32 %add1, i64 %add3, i8 zeroext %9, i8 zeroext %10, i8 zeroext %11, i32 %12)
- %x = getelementptr inbounds %struct.test_struct, %struct.test_struct* %ret, i32 0, i32 0
- store i32 20, i32* %x, align 4
- %13 = bitcast %struct.test_struct* %agg.result to i8*
- %14 = bitcast %struct.test_struct* %ret to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %13, i8* %14, i32 16, i32 8, i1 false)
- %15 = bitcast %struct.test_struct* %ret to i8*
- call void @llvm.lifetime.end(i64 16, i8* %15) #1
- %16 = bitcast i32* %z to i8*
- call void @llvm.lifetime.end(i64 4, i8* %16) #1
- ret void
-}
-
-; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-
-declare void @bar(%struct.test_struct* sret, i32, %struct.test_struct* byval, %struct.test_struct* byval, i8 zeroext, i64, i32, i64, i8 zeroext, i8 zeroext, i8 zeroext, i32) #2
-
-; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1
-
-; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
-
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv4" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv4" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
diff --git a/test/CodeGen/Hexagon/cext-valid-packet1.ll b/test/CodeGen/Hexagon/cext-valid-packet1.ll
index a479d37e4ae51..35e7b364b5089 100644
--- a/test/CodeGen/Hexagon/cext-valid-packet1.ll
+++ b/test/CodeGen/Hexagon/cext-valid-packet1.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; XFAIL:
; Check that the packetizer generates valid packets with constant
; extended instructions.
diff --git a/test/CodeGen/Hexagon/cext-valid-packet2.ll b/test/CodeGen/Hexagon/cext-valid-packet2.ll
index 2eba743299605..c3a4915ec2e08 100644
--- a/test/CodeGen/Hexagon/cext-valid-packet2.ll
+++ b/test/CodeGen/Hexagon/cext-valid-packet2.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; XFAIL:
; Check that the packetizer generates valid packets with constant
; extended add and base+offset store instructions.
diff --git a/test/CodeGen/Hexagon/compound.ll b/test/CodeGen/Hexagon/compound.ll
new file mode 100644
index 0000000000000..f8d36b8b77d90
--- /dev/null
+++ b/test/CodeGen/Hexagon/compound.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=hexagon -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s
+
+; CHECK: p0 = cmp.gt(r0,#-1); if (!p0.new) jump:nt
+
+declare void @a()
+declare void @b()
+
+define void @foo(i32 %a) {
+%b = icmp sgt i32 %a, -1
+br i1 %b, label %x, label %y
+x:
+call void @a()
+ret void
+y:
+call void @b()
+ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/Hexagon/dualstore.ll b/test/CodeGen/Hexagon/dualstore.ll
index 33d9ce9b93514..9f4569d6459c7 100644
--- a/test/CodeGen/Hexagon/dualstore.ll
+++ b/test/CodeGen/Hexagon/dualstore.ll
@@ -1,12 +1,11 @@
-; RUN: llc -march=hexagon -disable-hexagon-misched < %s | FileCheck %s
+; RUN: llc -march=hexagon -filetype=obj %s -o - | llvm-objdump -d - | FileCheck %s
; Check that we generate dual stores in one packet in V4
-; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}=
-; CHECK-NEXT: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}=
+; CHECK: 00 40 9f 52 529f4000
+; CHECK: 10 10 00 f0 f0001010
-define i32 @main(i32 %v, i32* %p1, i32* %p2) nounwind {
-entry:
- store i32 %v, i32* %p1, align 4
- store i32 %v, i32* %p2, align 4
- ret i32 0
+define void @foo(i32* %a, i32* %b) {
+ store i32 0, i32* %a
+ store i32 0, i32* %b
+ ret void
}
diff --git a/test/CodeGen/Hexagon/duplex.ll b/test/CodeGen/Hexagon/duplex.ll
new file mode 100644
index 0000000000000..80fe61ceccca3
--- /dev/null
+++ b/test/CodeGen/Hexagon/duplex.ll
@@ -0,0 +1,7 @@
+; RUN: llc -march=hexagon -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s
+
+; CHECK: c0 3f 00 48 48003fc0
+
+define i32 @foo() {
+ret i32 0
+} \ No newline at end of file
diff --git a/test/CodeGen/Hexagon/relax.ll b/test/CodeGen/Hexagon/relax.ll
new file mode 100644
index 0000000000000..9823d4d1cd9c9
--- /dev/null
+++ b/test/CodeGen/Hexagon/relax.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=hexagon -filetype=obj < %s | llvm-objdump -d -r - | FileCheck %s
+
+declare void @bar()
+
+define void @foo() {
+call void @bar()
+ret void
+}
+
+
+; CHECK: { allocframe(#0) }
+; CHECK: { call 0 }
+; CHECK: 00000004: R_HEX_B22_PCREL
+; CHECK: { dealloc_return } \ No newline at end of file
diff --git a/test/CodeGen/Hexagon/sube.ll b/test/CodeGen/Hexagon/sube.ll
index 873f52b2d5df3..9735894c419e2 100644
--- a/test/CodeGen/Hexagon/sube.ll
+++ b/test/CodeGen/Hexagon/sube.ll
@@ -3,10 +3,10 @@
; CHECK: r{{[0-9]+:[0-9]+}} = #1
; CHECK: r{{[0-9]+:[0-9]+}} = #0
; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
+; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}})
define void @check_sube_subc(i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) {
diff --git a/test/CodeGen/MIR/lit.local.cfg b/test/CodeGen/MIR/lit.local.cfg
new file mode 100644
index 0000000000000..e69aa5765356d
--- /dev/null
+++ b/test/CodeGen/MIR/lit.local.cfg
@@ -0,0 +1,2 @@
+config.suffixes = ['.mir']
+
diff --git a/test/CodeGen/MIR/llvm-ir-error-reported.mir b/test/CodeGen/MIR/llvm-ir-error-reported.mir
new file mode 100644
index 0000000000000..013b28cd78900
--- /dev/null
+++ b/test/CodeGen/MIR/llvm-ir-error-reported.mir
@@ -0,0 +1,22 @@
+# RUN: not llc -start-after branch-folder -stop-after branch-folder -o /dev/null %s 2>&1 | FileCheck %s
+# This test ensures an error is reported if the embedded LLVM IR contains an
+# error.
+
+--- |
+
+ ; CHECK: [[@LINE+3]]:15: error: use of undefined value '%a'
+ define i32 @foo(i32 %x, i32 %y) {
+ %z = alloca i32, align 4
+ store i32 %a, i32* %z, align 4
+ br label %Test
+ Test:
+ %m = load i32, i32* %z, align 4
+ %cond = icmp eq i32 %y, %m
+ br i1 %cond, label %IfEqual, label %IfUnequal
+ IfEqual:
+ ret i32 1
+ IfUnequal:
+ ret i32 0
+ }
+
+...
diff --git a/test/CodeGen/MIR/llvmIR.mir b/test/CodeGen/MIR/llvmIR.mir
new file mode 100644
index 0000000000000..7a7b46b626386
--- /dev/null
+++ b/test/CodeGen/MIR/llvmIR.mir
@@ -0,0 +1,32 @@
+# RUN: llc -start-after branch-folder -stop-after branch-folder -o /dev/null %s | FileCheck %s
+# This test ensures that the LLVM IR that's embedded with MIR is parsed
+# correctly.
+
+--- |
+ ; CHECK: define i32 @foo(i32 %x, i32 %y)
+ ; CHECK: %z = alloca i32, align 4
+ ; CHECK: store i32 %x, i32* %z, align 4
+ ; CHECK: br label %Test
+ ; CHECK: Test:
+ ; CHECK: %m = load i32, i32* %z, align 4
+ ; CHECK: %cond = icmp eq i32 %y, %m
+ ; CHECK: br i1 %cond, label %IfEqual, label %IfUnequal
+ ; CHECK: IfEqual:
+ ; CHECK: ret i32 1
+ ; CHECK: IfUnequal:
+ ; CHECK: ret i32 0
+ define i32 @foo(i32 %x, i32 %y) {
+ %z = alloca i32, align 4
+ store i32 %x, i32* %z, align 4
+ br label %Test
+ Test:
+ %m = load i32, i32* %z, align 4
+ %cond = icmp eq i32 %y, %m
+ br i1 %cond, label %IfEqual, label %IfUnequal
+ IfEqual:
+ ret i32 1
+ IfUnequal:
+ ret i32 0
+ }
+
+...
diff --git a/test/CodeGen/MIR/llvmIRMissing.mir b/test/CodeGen/MIR/llvmIRMissing.mir
new file mode 100644
index 0000000000000..2acbcd1f9884d
--- /dev/null
+++ b/test/CodeGen/MIR/llvmIRMissing.mir
@@ -0,0 +1,5 @@
+# RUN: llc -start-after branch-folder -stop-after branch-folder -o /dev/null %s
+# This test ensures that the MIR parser accepts files without the LLVM IR.
+
+---
+...
diff --git a/test/CodeGen/MIR/machine-function-missing-name.mir b/test/CodeGen/MIR/machine-function-missing-name.mir
new file mode 100644
index 0000000000000..54668f1a5efe3
--- /dev/null
+++ b/test/CodeGen/MIR/machine-function-missing-name.mir
@@ -0,0 +1,22 @@
+# RUN: not llc -start-after branch-folder -stop-after branch-folder -o /dev/null %s 2>&1 | FileCheck %s
+# This test ensures that an error is reported when a machine function doesn't
+# have a name attribute.
+
+--- |
+
+ define i32 @foo() {
+ ret i32 0
+ }
+
+ define i32 @bar() {
+ ret i32 0
+ }
+
+...
+---
+# CHECK: [[@LINE+1]]:1: error: missing required key 'name'
+nme: foo
+...
+---
+name: bar
+...
diff --git a/test/CodeGen/MIR/machine-function.mir b/test/CodeGen/MIR/machine-function.mir
new file mode 100644
index 0000000000000..679bfd2d16209
--- /dev/null
+++ b/test/CodeGen/MIR/machine-function.mir
@@ -0,0 +1,24 @@
+# RUN: llc -start-after branch-folder -stop-after branch-folder -o /dev/null %s | FileCheck %s
+# This test ensures that the MIR parser parses machine functions correctly.
+
+--- |
+
+ define i32 @foo() {
+ ret i32 0
+ }
+
+ define i32 @bar() {
+ ret i32 0
+ }
+
+...
+---
+# CHECK: name: foo
+# CHECK-NEXT: ...
+name: foo
+...
+---
+# CHECK: name: bar
+# CHECK-NEXT: ...
+name: bar
+...
diff --git a/test/CodeGen/Mips/Fast-ISel/bswap1.ll b/test/CodeGen/Mips/Fast-ISel/bswap1.ll
new file mode 100644
index 0000000000000..8ac9753fa463f
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/bswap1.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -O0 -relocation-model=pic \
+; RUN: -fast-isel=true -mips-fast-isel -fast-isel-abort=1 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=32R1
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -O0 -relocation-model=pic \
+; RUN: -fast-isel=true -mips-fast-isel -fast-isel-abort=1 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=32R2
+
+@a = global i16 -21829, align 2
+@b = global i32 -1430532899, align 4
+@a1 = common global i16 0, align 2
+@b1 = common global i32 0, align 4
+
+declare i16 @llvm.bswap.i16(i16)
+declare i32 @llvm.bswap.i32(i32)
+
+define void @b16() {
+ ; ALL-LABEL: b16:
+
+ ; ALL: lw $[[A_ADDR:[0-9]+]], %got(a)($[[GOT_ADDR:[0-9]+]])
+ ; ALL: lhu $[[A_VAL:[0-9]+]], 0($[[A_ADDR]])
+
+ ; 32R1: sll $[[TMP1:[0-9]+]], $[[A_VAL]], 8
+ ; 32R1: srl $[[TMP2:[0-9]+]], $[[A_VAL]], 8
+ ; 32R1: or $[[TMP3:[0-9]+]], $[[TMP1]], $[[TMP2]]
+ ; 32R1: andi $[[TMP4:[0-9]+]], $[[TMP3]], 65535
+
+ ; 32R2: wsbh $[[RESULT:[0-9]+]], $[[A_VAL]]
+
+ %1 = load i16, i16* @a, align 2
+ %2 = call i16 @llvm.bswap.i16(i16 %1)
+ store i16 %2, i16* @a1, align 2
+ ret void
+}
+
+define void @b32() {
+ ; ALL-LABEL: b32:
+
+ ; ALL: lw $[[B_ADDR:[0-9]+]], %got(b)($[[GOT_ADDR:[0-9]+]])
+ ; ALL: lw $[[B_VAL:[0-9]+]], 0($[[B_ADDR]])
+
+ ; 32R1: srl $[[TMP1:[0-9]+]], $[[B_VAL]], 8
+ ; 32R1: srl $[[TMP2:[0-9]+]], $[[B_VAL]], 24
+ ; 32R1: andi $[[TMP3:[0-9]+]], $[[TMP1]], 65280
+ ; 32R1: or $[[TMP4:[0-9]+]], $[[TMP2]], $[[TMP3]]
+ ; 32R1: andi $[[TMP5:[0-9]+]], $[[B_VAL]], 65280
+ ; 32R1: sll $[[TMP6:[0-9]+]], $[[TMP5]], 8
+ ; 32R1: sll $[[TMP7:[0-9]+]], $[[B_VAL]], 24
+ ; 32R1: or $[[TMP8:[0-9]+]], $[[TMP4]], $[[TMP6]]
+ ; 32R1: or $[[RESULT:[0-9]+]], $[[TMP7]], $[[TMP8]]
+
+ ; 32R2: wsbh $[[TMP:[0-9]+]], $[[B_VAL]]
+ ; 32R2: rotr $[[RESULT:[0-9]+]], $[[TMP]], 16
+
+ %1 = load i32, i32* @b, align 4
+ %2 = call i32 @llvm.bswap.i32(i32 %1)
+ store i32 %2, i32* @b1, align 4
+ ret void
+}
diff --git a/test/CodeGen/Mips/Fast-ISel/div1.ll b/test/CodeGen/Mips/Fast-ISel/div1.ll
new file mode 100644
index 0000000000000..89e7f211251f6
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/div1.ll
@@ -0,0 +1,55 @@
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -O0 -relocation-model=pic \
+; RUN: -fast-isel=true -mips-fast-isel -fast-isel-abort=1 | FileCheck %s
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -O0 -relocation-model=pic \
+; RUN: -fast-isel=true -mips-fast-isel -fast-isel-abort=1 | FileCheck %s
+
+@sj = global i32 200000, align 4
+@sk = global i32 -47, align 4
+@uj = global i32 200000, align 4
+@uk = global i32 43, align 4
+@si = common global i32 0, align 4
+@ui = common global i32 0, align 4
+
+define void @divs() {
+ ; CHECK-LABEL: divs:
+
+ ; CHECK: lui $[[GOT1:[0-9]+]], %hi(_gp_disp)
+ ; CHECK: addiu $[[GOT2:[0-9]+]], $[[GOT1]], %lo(_gp_disp)
+ ; CHECK: addu $[[GOT:[0-9]+]], $[[GOT2:[0-9]+]], $25
+ ; CHECK-DAG: lw $[[I_ADDR:[0-9]+]], %got(si)($[[GOT]])
+ ; CHECK-DAG: lw $[[K_ADDR:[0-9]+]], %got(sk)($[[GOT]])
+ ; CHECK-DAG: lw $[[J_ADDR:[0-9]+]], %got(sj)($[[GOT]])
+ ; CHECK-DAG: lw $[[J:[0-9]+]], 0($[[J_ADDR]])
+ ; CHECK-DAG: lw $[[K:[0-9]+]], 0($[[K_ADDR]])
+ ; CHECK-DAG: div $zero, $[[J]], $[[K]]
+ ; CHECK_DAG: teq $[[K]], $zero, 7
+ ; CHECK-DAG: mflo $[[RESULT:[0-9]+]]
+ ; CHECK: sw $[[RESULT]], 0($[[I_ADDR]])
+ %1 = load i32, i32* @sj, align 4
+ %2 = load i32, i32* @sk, align 4
+ %div = sdiv i32 %1, %2
+ store i32 %div, i32* @si, align 4
+ ret void
+}
+
+define void @divu() {
+ ; CHECK-LABEL: divu:
+
+ ; CHECK: lui $[[GOT1:[0-9]+]], %hi(_gp_disp)
+ ; CHECK: addiu $[[GOT2:[0-9]+]], $[[GOT1]], %lo(_gp_disp)
+ ; CHECK: addu $[[GOT:[0-9]+]], $[[GOT2:[0-9]+]], $25
+ ; CHECK-DAG: lw $[[I_ADDR:[0-9]+]], %got(ui)($[[GOT]])
+ ; CHECK-DAG: lw $[[K_ADDR:[0-9]+]], %got(uk)($[[GOT]])
+ ; CHECK-DAG: lw $[[J_ADDR:[0-9]+]], %got(uj)($[[GOT]])
+ ; CHECK-DAG: lw $[[J:[0-9]+]], 0($[[J_ADDR]])
+ ; CHECK-DAG: lw $[[K:[0-9]+]], 0($[[K_ADDR]])
+ ; CHECK-DAG: divu $zero, $[[J]], $[[K]]
+ ; CHECK_DAG: teq $[[K]], $zero, 7
+ ; CHECK-DAG: mflo $[[RESULT:[0-9]+]]
+ ; CHECK: sw $[[RESULT]], 0($[[I_ADDR]])
+ %1 = load i32, i32* @uj, align 4
+ %2 = load i32, i32* @uk, align 4
+ %div = udiv i32 %1, %2
+ store i32 %div, i32* @ui, align 4
+ ret void
+}
diff --git a/test/CodeGen/Mips/Fast-ISel/memtest1.ll b/test/CodeGen/Mips/Fast-ISel/memtest1.ll
new file mode 100644
index 0000000000000..a3fc4a32981c0
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/memtest1.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -O0 -relocation-model=pic \
+; RUN: -fast-isel=true -mips-fast-isel -fast-isel-abort=1 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=32R1
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -O0 -relocation-model=pic \
+; RUN: -fast-isel=true -mips-fast-isel -fast-isel-abort=1 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=32R2
+
+@str = private unnamed_addr constant [12 x i8] c"hello there\00", align 1
+@src = global i8* getelementptr inbounds ([12 x i8], [12 x i8]* @str, i32 0, i32 0), align 4
+@i = global i32 12, align 4
+@dest = common global [50 x i8] zeroinitializer, align 1
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
+declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1)
+
+define void @cpy(i8* %src, i32 %i) {
+ ; ALL-LABEL: cpy:
+
+ ; ALL-DAG: lw $[[T0:[0-9]+]], %got(dest)(${{[0-9]+}})
+ ; ALL-DAG: sw $4, 24($sp)
+ ; ALL-DAG: move $4, $[[T0]]
+ ; ALL-DAG: sw $5, 20($sp)
+ ; ALL-DAG: lw $[[T1:[0-9]+]], 24($sp)
+ ; ALL-DAG: move $5, $[[T1]]
+ ; ALL-DAG: lw $6, 20($sp)
+ ; ALL-DAG: lw $[[T2:[0-9]+]], %got(memcpy)(${{[0-9]+}})
+ ; ALL: jalr $[[T2]]
+ ; ALL-NEXT: nop
+ ; ALL-NOT: {{.*}}$2{{.*}}
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0),
+ i8* %src, i32 %i, i32 1, i1 false)
+ ret void
+}
+
+define void @mov(i8* %src, i32 %i) {
+ ; ALL-LABEL: mov:
+
+
+ ; ALL-DAG: lw $[[T0:[0-9]+]], %got(dest)(${{[0-9]+}})
+ ; ALL-DAG: sw $4, 24($sp)
+ ; ALL-DAG: move $4, $[[T0]]
+ ; ALL-DAG: sw $5, 20($sp)
+ ; ALL-DAG: lw $[[T1:[0-9]+]], 24($sp)
+ ; ALL-DAG: move $5, $[[T1]]
+ ; ALL-DAG: lw $6, 20($sp)
+ ; ALL-DAG: lw $[[T2:[0-9]+]], %got(memmove)(${{[0-9]+}})
+ ; ALL: jalr $[[T2]]
+ ; ALL-NEXT: nop
+ ; ALL-NOT: {{.*}}$2{{.*}}
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0),
+ i8* %src, i32 %i, i32 1, i1 false)
+ ret void
+}
+
+define void @clear(i32 %i) {
+ ; ALL-LABEL: clear:
+
+ ; ALL-DAG: lw $[[T0:[0-9]+]], %got(dest)(${{[0-9]+}})
+ ; ALL-DAG: sw $4, 16($sp)
+ ; ALL-DAG: move $4, $[[T0]]
+ ; ALL-DAG: addiu $[[T1:[0-9]+]], $zero, 42
+ ; 32R1-DAG: sll $[[T2:[0-9]+]], $[[T1]], 24
+ ; 32R1-DAG: sra $5, $[[T2]], 24
+ ; 32R2-DAG: seb $5, $[[T1]]
+ ; ALL-DAG: lw $6, 16($sp)
+ ; ALL-DAG: lw $[[T2:[0-9]+]], %got(memset)(${{[0-9]+}})
+ ; ALL: jalr $[[T2]]
+ ; ALL-NEXT: nop
+ ; ALL-NOT: {{.*}}$2{{.*}}
+ call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0),
+ i8 42, i32 %i, i32 1, i1 false)
+ ret void
+}
diff --git a/test/CodeGen/Mips/Fast-ISel/mul1.ll b/test/CodeGen/Mips/Fast-ISel/mul1.ll
new file mode 100644
index 0000000000000..0ee044bea0a78
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/mul1.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -O0 \
+; RUN: -fast-isel -mips-fast-isel -relocation-model=pic
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -O0 \
+; RUN: -fast-isel -mips-fast-isel -relocation-model=pic
+
+; The test is just to make sure it is able to allocate
+; registers for this example. There was an issue with allocating AC0
+; after a mul instruction.
+
+declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
+
+define i32 @foo(i32 %a, i32 %b) {
+entry:
+ %0 = mul i32 %a, %b
+ %1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %0, i32 %b)
+ %2 = extractvalue { i32, i1 } %1, 0
+ ret i32 %2
+}
diff --git a/test/CodeGen/Mips/Fast-ISel/rem1.ll b/test/CodeGen/Mips/Fast-ISel/rem1.ll
new file mode 100644
index 0000000000000..9b5e440d0eaa2
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/rem1.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -O0 -relocation-model=pic \
+; RUN: -fast-isel=true -mips-fast-isel -fast-isel-abort=1 | FileCheck %s
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -O0 -relocation-model=pic \
+; RUN: -fast-isel=true -mips-fast-isel -fast-isel-abort=1 | FileCheck %s
+
+@sj = global i32 200, align 4
+@sk = global i32 -47, align 4
+@uj = global i32 200, align 4
+@uk = global i32 43, align 4
+@si = common global i32 0, align 4
+@ui = common global i32 0, align 4
+
+define void @rems() {
+ ; CHECK-LABEL: rems:
+
+ ; CHECK: lui $[[GOT1:[0-9]+]], %hi(_gp_disp)
+ ; CHECK: addiu $[[GOT2:[0-9]+]], $[[GOT1]], %lo(_gp_disp)
+ ; CHECK: addu $[[GOT:[0-9]+]], $[[GOT2:[0-9]+]], $25
+ ; CHECK-DAG: lw $[[I_ADDR:[0-9]+]], %got(si)($[[GOT]])
+ ; CHECK-DAG: lw $[[K_ADDR:[0-9]+]], %got(sk)($[[GOT]])
+ ; CHECK-DAG: lw $[[J_ADDR:[0-9]+]], %got(sj)($[[GOT]])
+ ; CHECK-DAG: lw $[[J:[0-9]+]], 0($[[J_ADDR]])
+ ; CHECK-DAG: lw $[[K:[0-9]+]], 0($[[K_ADDR]])
+ ; CHECK-DAG: div $zero, $[[J]], $[[K]]
+ ; CHECK_DAG: teq $[[K]], $zero, 7
+ ; CHECK-DAG: mfhi $[[RESULT:[0-9]+]]
+ ; CHECK: sw $[[RESULT]], 0($[[I_ADDR]])
+ %1 = load i32, i32* @sj, align 4
+ %2 = load i32, i32* @sk, align 4
+ %rem = srem i32 %1, %2
+ store i32 %rem, i32* @si, align 4
+ ret void
+}
+
+; Function Attrs: noinline nounwind
+define void @remu() {
+ ; CHECK-LABEL: remu:
+
+ ; CHECK: lui $[[GOT1:[0-9]+]], %hi(_gp_disp)
+ ; CHECK: addiu $[[GOT2:[0-9]+]], $[[GOT1]], %lo(_gp_disp)
+ ; CHECK: addu $[[GOT:[0-9]+]], $[[GOT2:[0-9]+]], $25
+ ; CHECK-DAG: lw $[[I_ADDR:[0-9]+]], %got(ui)($[[GOT]])
+ ; CHECK-DAG: lw $[[K_ADDR:[0-9]+]], %got(uk)($[[GOT]])
+ ; CHECK-DAG: lw $[[J_ADDR:[0-9]+]], %got(uj)($[[GOT]])
+ ; CHECK-DAG: lw $[[J:[0-9]+]], 0($[[J_ADDR]])
+ ; CHECK-DAG: lw $[[K:[0-9]+]], 0($[[K_ADDR]])
+ ; CHECK-DAG: divu $zero, $[[J]], $[[K]]
+ ; CHECK_DAG: teq $[[K]], $zero, 7
+ ; CHECK-DAG: mfhi $[[RESULT:[0-9]+]]
+ ; CHECK: sw $[[RESULT]], 0($[[I_ADDR]])
+ %1 = load i32, i32* @uj, align 4
+ %2 = load i32, i32* @uk, align 4
+ %rem = urem i32 %1, %2
+ store i32 %rem, i32* @ui, align 4
+ ret void
+}
diff --git a/test/CodeGen/Mips/Fast-ISel/sel1.ll b/test/CodeGen/Mips/Fast-ISel/sel1.ll
new file mode 100644
index 0000000000000..47b6a895cde87
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/sel1.ll
@@ -0,0 +1,91 @@
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -O2 -relocation-model=pic \
+; RUN: -fast-isel -mips-fast-isel -fast-isel-abort=1 | FileCheck %s
+
+define i1 @sel_i1(i1 %j, i1 %k, i1 %l) {
+entry:
+ ; CHECK-LABEL: sel_i1:
+
+ ; FIXME: The following instruction is redundant.
+ ; CHECK: xor $[[T0:[0-9]+]], $4, $zero
+ ; CHECK-NEXT: sltu $[[T1:[0-9]+]], $zero, $[[T0]]
+ ; CHECK-NEXT: movn $6, $5, $[[T1]]
+ ; CHECK: move $2, $6
+ %cond = icmp ne i1 %j, 0
+ %res = select i1 %cond, i1 %k, i1 %l
+ ret i1 %res
+}
+
+define i8 @sel_i8(i8 %j, i8 %k, i8 %l) {
+entry:
+ ; CHECK-LABEL: sel_i8:
+
+ ; CHECK-DAG: seb $[[T0:[0-9]+]], $4
+ ; FIXME: The following 2 instructions are redundant.
+ ; CHECK-DAG: seb $[[T1:[0-9]+]], $zero
+ ; CHECK: xor $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+ ; CHECK-NEXT: sltu $[[T3:[0-9]+]], $zero, $[[T2]]
+ ; CHECK-NEXT: movn $6, $5, $[[T3]]
+ ; CHECK: move $2, $6
+ %cond = icmp ne i8 %j, 0
+ %res = select i1 %cond, i8 %k, i8 %l
+ ret i8 %res
+}
+
+define i16 @sel_i16(i16 %j, i16 %k, i16 %l) {
+entry:
+ ; CHECK-LABEL: sel_i16:
+
+ ; CHECK-DAG: seh $[[T0:[0-9]+]], $4
+ ; FIXME: The following 2 instructions are redundant.
+ ; CHECK-DAG: seh $[[T1:[0-9]+]], $zero
+ ; CHECK: xor $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+ ; CHECK-NEXT: sltu $[[T3:[0-9]+]], $zero, $[[T2]]
+ ; CHECK-NEXT: movn $6, $5, $[[T3]]
+ ; CHECK: move $2, $6
+ %cond = icmp ne i16 %j, 0
+ %res = select i1 %cond, i16 %k, i16 %l
+ ret i16 %res
+}
+
+define i32 @sel_i32(i32 %j, i32 %k, i32 %l) {
+entry:
+ ; CHECK-LABEL: sel_i32:
+
+ ; FIXME: The following instruction is redundant.
+ ; CHECK: xor $[[T0:[0-9]+]], $4, $zero
+ ; CHECK-NEXT: sltu $[[T1:[0-9]+]], $zero, $[[T0]]
+ ; CHECK-NEXT: movn $6, $5, $[[T1]]
+ ; CHECK: move $2, $6
+ %cond = icmp ne i32 %j, 0
+ %res = select i1 %cond, i32 %k, i32 %l
+ ret i32 %res
+}
+
+define float @sel_float(i32 %j, float %k, float %l) {
+entry:
+ ; CHECK-LABEL: sel_float:
+
+ ; CHECK-DAG: mtc1 $6, $f0
+ ; CHECK-DAG: mtc1 $5, $f1
+ ; CHECK-DAG: xor $[[T0:[0-9]+]], $4, $zero
+ ; CHECK: sltu $[[T1:[0-9]+]], $zero, $[[T0]]
+ ; CHECK: movn.s $f0, $f1, $[[T1]]
+ %cond = icmp ne i32 %j, 0
+ %res = select i1 %cond, float %k, float %l
+ ret float %res
+}
+
+define double @sel_double(i32 %j, double %k, double %l) {
+entry:
+ ; CHECK-LABEL: sel_double:
+
+ ; CHECK-DAG: mtc1 $6, $f2
+ ; CHECK-DAG: mthc1 $7, $f2
+ ; CHECK-DAG: ldc1 $f0, 16($sp)
+ ; CHECK-DAG: xor $[[T0:[0-9]+]], $4, $zero
+ ; CHECK: sltu $[[T1:[0-9]+]], $zero, $[[T0]]
+ ; CHECK: movn.d $f0, $f2, $[[T1]]
+ %cond = icmp ne i32 %j, 0
+ %res = select i1 %cond, double %k, double %l
+ ret double %res
+}
diff --git a/test/CodeGen/Mips/dynamic-stack-realignment.ll b/test/CodeGen/Mips/dynamic-stack-realignment.ll
new file mode 100644
index 0000000000000..777930a37ad50
--- /dev/null
+++ b/test/CodeGen/Mips/dynamic-stack-realignment.ll
@@ -0,0 +1,299 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips3 -target-abi n32 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+; RUN: llc < %s -march=mips64 -mcpu=mips64 -target-abi n32 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -target-abi n32 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+
+; Check dynamic stack realignment in functions without variable-sized objects.
+
+declare void @helper_01(i32, i32, i32, i32, i32*)
+
+; O32 ABI
+define void @func_01() {
+entry:
+; GP32-LABEL: func_01:
+
+ ; prologue
+ ; FIXME: We are currently over-allocating stack space. This particular case
+ ; needs a frame of up to between 16 and 512-bytes but currently
+ ; allocates between 1024 and 1536 bytes
+ ; GP32: addiu $sp, $sp, -1024
+ ; GP32: sw $ra, 1020($sp)
+ ; GP32: sw $fp, 1016($sp)
+ ;
+ ; GP32: move $fp, $sp
+ ; GP32: addiu $[[T0:[0-9]+|ra|gp]], $zero, -512
+ ; GP32-NEXT: and $sp, $sp, $[[T0]]
+
+ ; body
+ ; GP32: addiu $[[T1:[0-9]+]], $sp, 512
+ ; GP32: sw $[[T1]], 16($sp)
+
+ ; epilogue
+ ; GP32: move $sp, $fp
+ ; GP32: lw $fp, 1016($sp)
+ ; GP32: lw $ra, 1020($sp)
+ ; GP32: addiu $sp, $sp, 1024
+
+ %a = alloca i32, align 512
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+ ret void
+}
+
+declare void @helper_02(i32, i32, i32, i32,
+ i32, i32, i32, i32, i32*)
+
+; N32/N64 ABIs
+define void @func_02() {
+entry:
+; GP64-LABEL: func_02:
+
+ ; prologue
+ ; FIXME: We are currently over-allocating stack space. This particular case
+ ; needs a frame of up to between 16 and 512-bytes but currently
+ ; allocates between 1024 and 1536 bytes
+ ; N32: addiu $sp, $sp, -1024
+ ; N64: daddiu $sp, $sp, -1024
+ ; GP64: sd $ra, 1016($sp)
+ ; GP64: sd $fp, 1008($sp)
+ ; N32: sd $gp, 1000($sp)
+ ;
+ ; GP64: move $fp, $sp
+ ; N32: addiu $[[T0:[0-9]+|ra]], $zero, -512
+ ; N64: daddiu $[[T0:[0-9]+|ra]], $zero, -512
+ ; GP64-NEXT: and $sp, $sp, $[[T0]]
+
+ ; body
+ ; N32: addiu $[[T1:[0-9]+]], $sp, 512
+ ; N64: daddiu $[[T1:[0-9]+]], $sp, 512
+ ; GP64: sd $[[T1]], 0($sp)
+
+ ; epilogue
+ ; GP64: move $sp, $fp
+ ; N32: ld $gp, 1000($sp)
+ ; GP64: ld $fp, 1008($sp)
+ ; GP64: ld $ra, 1016($sp)
+ ; N32: addiu $sp, $sp, 1024
+ ; N64: daddiu $sp, $sp, 1024
+
+ %a = alloca i32, align 512
+ call void @helper_02(i32 0, i32 0, i32 0, i32 0,
+ i32 0, i32 0, i32 0, i32 0, i32* %a)
+ ret void
+}
+
+; Verify that we use $fp for referencing incoming arguments.
+
+declare void @helper_03(i32, i32, i32, i32, i32*, i32*)
+
+; O32 ABI
+define void @func_03(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32* %b) {
+entry:
+; GP32-LABEL: func_03:
+
+ ; body
+ ; FIXME: We are currently over-allocating stack space.
+ ; GP32-DAG: addiu $[[T0:[0-9]+]], $sp, 512
+ ; GP32-DAG: sw $[[T0]], 16($sp)
+ ; GP32-DAG: lw $[[T1:[0-9]+]], 1040($fp)
+ ; GP32-DAG: sw $[[T1]], 20($sp)
+
+ %a = alloca i32, align 512
+ call void @helper_03(i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+ ret void
+}
+
+declare void @helper_04(i32, i32, i32, i32,
+ i32, i32, i32, i32, i32*, i32*)
+
+; N32/N64 ABIs
+define void @func_04(i32 %p0, i32 %p1, i32 %p2, i32 %p3,
+ i32 %p4, i32 %p5, i32 %p6, i32 %p7,
+ i32* %b) {
+entry:
+; GP64-LABEL: func_04:
+
+ ; body
+ ; FIXME: We are currently over-allocating stack space.
+ ; N32-DAG: addiu $[[T0:[0-9]+]], $sp, 512
+ ; N64-DAG: daddiu $[[T0:[0-9]+]], $sp, 512
+ ; GP64-DAG: sd $[[T0]], 0($sp)
+ ; GP64-DAG: ld $[[T1:[0-9]+]], 1024($fp)
+ ; GP64-DAG: sd $[[T1]], 8($sp)
+
+ %a = alloca i32, align 512
+ call void @helper_04(i32 0, i32 0, i32 0, i32 0,
+ i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+ ret void
+}
+
+; Check dynamic stack realignment in functions with variable-sized objects.
+
+; O32 ABI
+define void @func_05(i32 %sz) {
+entry:
+; GP32-LABEL: func_05:
+
+ ; prologue
+ ; FIXME: We are currently over-allocating stack space.
+ ; GP32: addiu $sp, $sp, -1024
+ ; GP32: sw $fp, 1020($sp)
+ ; GP32: sw $23, 1016($sp)
+ ;
+ ; GP32: move $fp, $sp
+ ; GP32: addiu $[[T0:[0-9]+|gp]], $zero, -512
+ ; GP32-NEXT: and $sp, $sp, $[[T0]]
+ ; GP32-NEXT: move $23, $sp
+
+ ; body
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 222
+ ; GP32: sw $[[T1]], 508($23)
+
+ ; epilogue
+ ; GP32: move $sp, $fp
+ ; GP32: lw $23, 1016($sp)
+ ; GP32: lw $fp, 1020($sp)
+ ; GP32: addiu $sp, $sp, 1024
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ ret void
+}
+
+; N32/N64 ABIs
+define void @func_06(i32 %sz) {
+entry:
+; GP64-LABEL: func_06:
+
+ ; prologue
+ ; FIXME: We are currently over-allocating stack space.
+ ; N32: addiu $sp, $sp, -1024
+ ; N64: daddiu $sp, $sp, -1024
+ ; GP64: sd $fp, 1016($sp)
+ ; GP64: sd $23, 1008($sp)
+ ;
+ ; GP64: move $fp, $sp
+ ; GP64: addiu $[[T0:[0-9]+|gp]], $zero, -512
+ ; GP64-NEXT: and $sp, $sp, $[[T0]]
+ ; GP64-NEXT: move $23, $sp
+
+ ; body
+ ; GP64: addiu $[[T1:[0-9]+]], $zero, 222
+ ; GP64: sw $[[T1]], 508($23)
+
+ ; epilogue
+ ; GP64: move $sp, $fp
+ ; GP64: ld $23, 1008($sp)
+ ; GP64: ld $fp, 1016($sp)
+ ; N32: addiu $sp, $sp, 1024
+ ; N64: daddiu $sp, $sp, 1024
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ ret void
+}
+
+; Verify that we use $fp for referencing incoming arguments and $sp for
+; building outbound arguments for nested function calls.
+
+; O32 ABI
+define void @func_07(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %sz) {
+entry:
+; GP32-LABEL: func_07:
+
+ ; body
+ ; FIXME: We are currently over-allocating stack space.
+ ; GP32-DAG: lw $[[T0:[0-9]+]], 1040($fp)
+ ;
+ ; GP32-DAG: addiu $[[T1:[0-9]+]], $zero, 222
+ ; GP32-DAG: sw $[[T1]], 508($23)
+ ;
+ ; GP32-DAG: sw $[[T2:[0-9]+]], 16($sp)
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a1)
+
+ ret void
+}
+
+; N32/N64 ABIs
+define void @func_08(i32 %p0, i32 %p1, i32 %p2, i32 %p3,
+ i32 %p4, i32 %p5, i32 %p6, i32 %p7,
+ i32 %sz) {
+entry:
+; GP64-LABEL: func_08:
+
+ ; body
+ ; FIXME: We are currently over-allocating stack space.
+ ; N32-DAG: lw $[[T0:[0-9]+]], 1028($fp)
+ ; N64-DAG: lwu $[[T0:[0-9]+]], 1028($fp)
+ ;
+ ; GP64-DAG: addiu $[[T1:[0-9]+]], $zero, 222
+ ; GP64-DAG: sw $[[T1]], 508($23)
+ ;
+ ; GP64-DAG: sd $[[T2:[0-9]+]], 0($sp)
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ call void @helper_02(i32 0, i32 0, i32 0, i32 0,
+ i32 0, i32 0, i32 0, i32 0, i32* %a1)
+ ret void
+}
+
+; Check that we do not perform dynamic stack realignment in the presence of
+; the "no-realign-stack" function attribute.
+define void @func_09() "no-realign-stack" {
+entry:
+; ALL-LABEL: func_09:
+
+ ; ALL-NOT: and $sp, $sp, $[[T0:[0-9]+|ra|gp]]
+
+ %a = alloca i32, align 512
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+ ret void
+}
+
+define void @func_10(i32 %sz) "no-realign-stack" {
+entry:
+; ALL-LABEL: func_10:
+
+ ; ALL-NOT: and $sp, $sp, $[[T0:[0-9]+|ra|gp]]
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ ret void
+}
diff --git a/test/CodeGen/Mips/ehframe-indirect.ll b/test/CodeGen/Mips/ehframe-indirect.ll
index f124881a472fc..dc06ef7840ff7 100644
--- a/test/CodeGen/Mips/ehframe-indirect.ll
+++ b/test/CodeGen/Mips/ehframe-indirect.ll
@@ -1,9 +1,11 @@
-; RUN: llc -mtriple=mipsel-linux-gnu < %s | FileCheck -check-prefix=ALL -check-prefix=O32 %s
-; RUN: llc -mtriple=mipsel-linux-android < %s | FileCheck -check-prefix=ALL -check-prefix=O32 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -target-abi=n32 < %s | FileCheck -check-prefix=ALL -check-prefix=N32 %s
-; RUN: llc -mtriple=mips64el-linux-android -target-abi=n32 < %s | FileCheck -check-prefix=ALL -check-prefix=N32 %s
-; RUN: llc -mtriple=mips64el-linux-gnu < %s | FileCheck -check-prefix=ALL -check-prefix=N64 %s
-; RUN: llc -mtriple=mips64el-linux-android < %s | FileCheck -check-prefix=ALL -check-prefix=N64 %s
+; RUN: llc -mtriple=mipsel-linux-gnu < %s -asm-verbose | FileCheck -check-prefix=ALL -check-prefix=O32 %s
+; RUN: llc -mtriple=mipsel-linux-android < %s -asm-verbose | FileCheck -check-prefix=ALL -check-prefix=O32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -target-abi=n32 < %s -asm-verbose | FileCheck -check-prefix=ALL -check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-android -target-abi=n32 < %s -asm-verbose | FileCheck -check-prefix=ALL -check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu < %s -asm-verbose | FileCheck -check-prefix=ALL -check-prefix=N64 %s
+; RUN: llc -mtriple=mips64el-linux-android < %s -asm-verbose | FileCheck -check-prefix=ALL -check-prefix=N64 %s
+
+@_ZTISt9exception = external constant i8*
define i32 @main() {
; ALL: .cfi_startproc
@@ -16,7 +18,9 @@ entry:
lpad:
%0 = landingpad { i8*, i32 } personality i8*
- bitcast (i32 (...)* @__gxx_personality_v0 to i8*) catch i8* null
+ bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ catch i8* bitcast (i8** @_ZTISt9exception to i8*)
ret i32 0
cont:
@@ -28,6 +32,14 @@ declare i32 @__gxx_personality_v0(...)
declare void @foo()
+; ALL: GCC_except_table{{[0-9]+}}:
+; ALL: .byte 155 # @TType Encoding = indirect pcrel sdata4
+; ALL: $[[PC_LABEL:tmp[0-9]+]]:
+; ALL: .4byte ($_ZTISt9exception.DW.stub)-($[[PC_LABEL]])
+; ALL: $_ZTISt9exception.DW.stub:
+; O32: .4byte _ZTISt9exception
+; N32: .4byte _ZTISt9exception
+; N64: .8byte _ZTISt9exception
; ALL: .hidden DW.ref.__gxx_personality_v0
; ALL: .weak DW.ref.__gxx_personality_v0
; ALL: .section .data.DW.ref.__gxx_personality_v0,"aGw",@progbits,DW.ref.__gxx_personality_v0,comdat
diff --git a/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll b/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
index 3dc1cde77095e..779620e10128a 100644
--- a/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
+++ b/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
@@ -1,8 +1,8 @@
; Check that register scavenging spill slot is close to $fp.
; RUN: llc -march=mipsel -O0 < %s | FileCheck %s
-; CHECK: sw ${{.*}}, 4($fp)
-; CHECK: lw ${{.*}}, 4($fp)
+; CHECK: sw ${{.*}}, 4($sp)
+; CHECK: lw ${{.*}}, 4($sp)
define i32 @main(i32 signext %argc, i8** %argv) "no-frame-pointer-elim"="true" {
entry:
diff --git a/test/CodeGen/NVPTX/access-non-generic.ll b/test/CodeGen/NVPTX/access-non-generic.ll
index e709302918f52..5deefe881e3fb 100644
--- a/test/CodeGen/NVPTX/access-non-generic.ll
+++ b/test/CodeGen/NVPTX/access-non-generic.ll
@@ -85,6 +85,22 @@ define i32 @ld_int_from_float() {
ret i32 %1
}
+define i32 @ld_int_from_global_float(float addrspace(1)* %input, i32 %i, i32 %j) {
+; IR-LABEL: @ld_int_from_global_float(
+; PTX-LABEL: ld_int_from_global_float(
+ %1 = addrspacecast float addrspace(1)* %input to float*
+ %2 = getelementptr float, float* %1, i32 %i
+; IR-NEXT: getelementptr float, float addrspace(1)* %input, i32 %i
+ %3 = getelementptr float, float* %2, i32 %j
+; IR-NEXT: getelementptr float, float addrspace(1)* {{%[^,]+}}, i32 %j
+ %4 = bitcast float* %3 to i32*
+; IR-NEXT: bitcast float addrspace(1)* {{%[^ ]+}} to i32 addrspace(1)*
+ %5 = load i32, i32* %4
+; IR-NEXT: load i32, i32 addrspace(1)* {{%.+}}
+; PTX-LABEL: ld.global
+ ret i32 %5
+}
+
declare void @llvm.cuda.syncthreads() #3
attributes #3 = { noduplicate nounwind }
diff --git a/test/CodeGen/NVPTX/bug21465.ll b/test/CodeGen/NVPTX/bug21465.ll
index 76af386c6516d..2eae41f73a0c3 100644
--- a/test/CodeGen/NVPTX/bug21465.ll
+++ b/test/CodeGen/NVPTX/bug21465.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -nvptx-lower-struct-args -S | FileCheck %s
+; RUN: opt < %s -nvptx-lower-kernel-args -S | FileCheck %s
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s --check-prefix PTX
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
target triple = "nvptx64-unknown-unknown"
@@ -8,12 +9,15 @@ target triple = "nvptx64-unknown-unknown"
; Function Attrs: nounwind
define void @_Z11TakesStruct1SPi(%struct.S* byval nocapture readonly %input, i32* nocapture %output) #0 {
entry:
-; CHECK-LABEL @_Z22TakesStruct1SPi
-; CHECK: bitcast %struct.S* %input to i8*
-; CHECK: call i8 addrspace(101)* @llvm.nvvm.ptr.gen.to.param.p101i8.p0i8
+; CHECK-LABEL: @_Z11TakesStruct1SPi
+; PTX-LABEL: .visible .entry _Z11TakesStruct1SPi(
+; CHECK: addrspacecast %struct.S* %input to %struct.S addrspace(101)*
%b = getelementptr inbounds %struct.S, %struct.S* %input, i64 0, i32 1
%0 = load i32, i32* %b, align 4
+; PTX-NOT: ld.param.u32 {{%r[0-9]+}}, [{{%rd[0-9]+}}]
+; PTX: ld.param.u32 [[value:%r[0-9]+]], [{{%rd[0-9]+}}+4]
store i32 %0, i32* %output, align 4
+; PTX-NEXT: st.global.u32 [{{%rd[0-9]+}}], [[value]]
ret void
}
diff --git a/test/CodeGen/NVPTX/call-with-alloca-buffer.ll b/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
index 58b1911299175..c70670da13d60 100644
--- a/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
+++ b/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
@@ -24,7 +24,10 @@ entry:
; CHECK: cvta.local.u64 %SP, %rd[[BUF_REG]]
; CHECK: ld.param.u64 %rd[[A_REG:[0-9]+]], [kernel_func_param_0]
-; CHECK: ld.f32 %f[[A0_REG:[0-9]+]], [%rd[[A_REG]]]
+; CHECK: cvta.to.global.u64 %rd[[A1_REG:[0-9]+]], %rd[[A_REG]]
+; FIXME: casting A1_REG to A2_REG is unnecessary; A2_REG is essentially A_REG
+; CHECK: cvta.global.u64 %rd[[A2_REG:[0-9]+]], %rd[[A1_REG]]
+; CHECK: ld.global.f32 %f[[A0_REG:[0-9]+]], [%rd[[A1_REG]]]
; CHECK: st.f32 [%SP+0], %f[[A0_REG]]
%0 = load float, float* %a, align 4
@@ -48,7 +51,7 @@ entry:
; CHECK: add.u64 %rd[[SP_REG:[0-9]+]], %SP, 0
; CHECK: .param .b64 param0;
-; CHECK-NEXT: st.param.b64 [param0+0], %rd[[A_REG]]
+; CHECK-NEXT: st.param.b64 [param0+0], %rd[[A2_REG]]
; CHECK-NEXT: .param .b64 param1;
; CHECK-NEXT: st.param.b64 [param1+0], %rd[[SP_REG]]
; CHECK-NEXT: call.uni
diff --git a/test/CodeGen/NVPTX/globals_init.ll b/test/CodeGen/NVPTX/globals_init.ll
new file mode 100644
index 0000000000000..5b45f410156ce
--- /dev/null
+++ b/test/CodeGen/NVPTX/globals_init.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; Make sure the globals constant initializers are not prone to host endianess
+; issues.
+
+; CHECK-DAG: .b8 Gbli08[2] = {171, 205};
+@Gbli08 = global [2 x i8] [i8 171, i8 205]
+
+; CHECK-DAG: .b8 Gbli16[4] = {205, 171, 1, 239};
+@Gbli16 = global [2 x i16] [i16 43981, i16 61185]
+
+; CHECK-DAG: .b8 Gbli32[8] = {1, 239, 205, 171, 137, 103, 69, 35};
+@Gbli32 = global [2 x i32] [i32 2882400001, i32 591751049]
+
+; CHECK-DAG: .b8 Gbli64[16] = {137, 103, 69, 35, 1, 239, 205, 171, 239, 205, 171, 137, 103, 69, 35, 1};
+@Gbli64 = global [2 x i64] [i64 12379813738877118345, i64 81985529216486895]
+
+; CHECK-DAG: .b8 Gblf32[8] = {192, 225, 100, 75, 0, 96, 106, 69};
+@Gblf32 = global [2 x float] [float 1.5e+7, float 3.75e+3]
+
+; CHECK-DAG: .b8 Gblf64[16] = {116, 10, 181, 48, 134, 62, 230, 58, 106, 222, 138, 98, 204, 250, 200, 75};
+@Gblf64 = global [2 x double] [double 5.75e-25, double 12.25e+56]
+
diff --git a/test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll b/test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll
new file mode 100644
index 0000000000000..53220bd905bda
--- /dev/null
+++ b/test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+; Verify that both %input and %output are converted to global pointers and then
+; addrspacecast'ed back to the original type.
+define void @kernel(float* %input, float* %output) {
+; CHECK-LABEL: .visible .entry kernel(
+; CHECK: cvta.to.global.u64
+; CHECK: cvta.to.global.u64
+ %1 = load float, float* %input, align 4
+; CHECK: ld.global.f32
+ store float %1, float* %output, align 4
+; CHECK: st.global.f32
+ ret void
+}
+
+!nvvm.annotations = !{!0}
+!0 = !{void (float*, float*)* @kernel, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/pr13291-i1-store.ll b/test/CodeGen/NVPTX/pr13291-i1-store.ll
index d4f7c3bd210a8..934df30a3a7dc 100644
--- a/test/CodeGen/NVPTX/pr13291-i1-store.ll
+++ b/test/CodeGen/NVPTX/pr13291-i1-store.ll
@@ -3,19 +3,19 @@
define ptx_kernel void @t1(i1* %a) {
; PTX32: mov.u16 %rs{{[0-9]+}}, 0;
-; PTX32-NEXT: st.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}};
+; PTX32-NEXT: st.global.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}};
; PTX64: mov.u16 %rs{{[0-9]+}}, 0;
-; PTX64-NEXT: st.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}};
+; PTX64-NEXT: st.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}};
store i1 false, i1* %a
ret void
}
define ptx_kernel void @t2(i1* %a, i8* %b) {
-; PTX32: ld.u8 %rs{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.global.u8 %rs{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: and.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, 1;
; PTX32: setp.eq.b16 %p{{[0-9]+}}, %rs{{[0-9]+}}, 1;
-; PTX64: ld.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+; PTX64: ld.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: and.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, 1;
; PTX64: setp.eq.b16 %p{{[0-9]+}}, %rs{{[0-9]+}}, 1;
diff --git a/test/CodeGen/NVPTX/surf-read-cuda.ll b/test/CodeGen/NVPTX/surf-read-cuda.ll
index ed021346c0f9d..c17c71e01d3e3 100644
--- a/test/CodeGen/NVPTX/surf-read-cuda.ll
+++ b/test/CodeGen/NVPTX/surf-read-cuda.ll
@@ -18,8 +18,8 @@ define void @foo(i64 %img, float* %red, i32 %idx) {
; SM20: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
; SM30: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
%ret = sitofp i32 %val to float
-; SM20: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
-; SM30: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
+; SM20: st.global.f32 [%r{{[0-9]+}}], %f[[REDF]]
+; SM30: st.global.f32 [%r{{[0-9]+}}], %f[[REDF]]
store float %ret, float* %red
ret void
}
@@ -37,8 +37,8 @@ define void @bar(float* %red, i32 %idx) {
; SM20: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
; SM30: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
%ret = sitofp i32 %val to float
-; SM20: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
-; SM30: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
+; SM20: st.global.f32 [%r{{[0-9]+}}], %f[[REDF]]
+; SM30: st.global.f32 [%r{{[0-9]+}}], %f[[REDF]]
store float %ret, float* %red
ret void
}
diff --git a/test/CodeGen/NVPTX/tex-read-cuda.ll b/test/CodeGen/NVPTX/tex-read-cuda.ll
index c5b5600de8742..d5f7c1667f17b 100644
--- a/test/CodeGen/NVPTX/tex-read-cuda.ll
+++ b/test/CodeGen/NVPTX/tex-read-cuda.ll
@@ -16,8 +16,8 @@ define void @foo(i64 %img, float* %red, i32 %idx) {
; SM30: tex.1d.v4.f32.s32 {%f[[RED:[0-9]+]], %f[[GREEN:[0-9]+]], %f[[BLUE:[0-9]+]], %f[[ALPHA:[0-9]+]]}, [%rd[[TEXREG]], {%r{{[0-9]+}}}]
%val = tail call { float, float, float, float } @llvm.nvvm.tex.unified.1d.v4f32.s32(i64 %img, i32 %idx)
%ret = extractvalue { float, float, float, float } %val, 0
-; SM20: st.f32 [%r{{[0-9]+}}], %f[[RED]]
-; SM30: st.f32 [%r{{[0-9]+}}], %f[[RED]]
+; SM20: st.global.f32 [%r{{[0-9]+}}], %f[[RED]]
+; SM30: st.global.f32 [%r{{[0-9]+}}], %f[[RED]]
store float %ret, float* %red
ret void
}
@@ -34,8 +34,8 @@ define void @bar(float* %red, i32 %idx) {
; SM30: tex.1d.v4.f32.s32 {%f[[RED:[0-9]+]], %f[[GREEN:[0-9]+]], %f[[BLUE:[0-9]+]], %f[[ALPHA:[0-9]+]]}, [%rd[[TEXHANDLE]], {%r{{[0-9]+}}}]
%val = tail call { float, float, float, float } @llvm.nvvm.tex.unified.1d.v4f32.s32(i64 %texHandle, i32 %idx)
%ret = extractvalue { float, float, float, float } %val, 0
-; SM20: st.f32 [%r{{[0-9]+}}], %f[[RED]]
-; SM30: st.f32 [%r{{[0-9]+}}], %f[[RED]]
+; SM20: st.global.f32 [%r{{[0-9]+}}], %f[[RED]]
+; SM30: st.global.f32 [%r{{[0-9]+}}], %f[[RED]]
store float %ret, float* %red
ret void
}
diff --git a/test/CodeGen/PowerPC/fma.ll b/test/CodeGen/PowerPC/fma.ll
index ab5251b2a554f..9cfef398edfd2 100644
--- a/test/CodeGen/PowerPC/fma.ll
+++ b/test/CodeGen/PowerPC/fma.ll
@@ -1,9 +1,12 @@
; RUN: llc < %s -march=ppc32 -fp-contract=fast -mattr=-vsx | FileCheck %s
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -fp-contract=fast -mattr=+vsx -mcpu=pwr7 | FileCheck -check-prefix=CHECK-VSX %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -fp-contract=fast -mcpu=pwr8 | FileCheck -check-prefix=CHECK-P8 %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -fp-contract=fast -mcpu=pwr8 | FileCheck -check-prefix=CHECK-P8 %s
declare double @dummy1(double) #0
declare double @dummy2(double, double) #0
declare double @dummy3(double, double, double) #0
+declare float @dummy4(float, float) #0
define double @test_FMADD1(double %A, double %B, double %C) {
%D = fmul double %A, %B ; <double> [#uses=1]
@@ -126,3 +129,83 @@ define float @test_FNMSUBS(float %A, float %B, float %C) {
; CHECK-VSX: fnmsubs
; CHECK-VSX-NEXT: blr
}
+
+define float @test_XSMADDMSP(float %A, float %B, float %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fadd float %C, %D ; <float> [#uses=1]
+ ret float %E
+; CHECK-P8-LABEL: test_XSMADDMSP:
+; CHECK-P8: xsmaddmsp
+; CHECK-P8-NEXT: blr
+}
+
+define float @test_XSMSUBMSP(float %A, float %B, float %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fsub float %D, %C ; <float> [#uses=1]
+ ret float %E
+; CHECK-P8-LABEL: test_XSMSUBMSP:
+; CHECK-P8: xsmsubmsp
+; CHECK-P8-NEXT: blr
+}
+
+define float @test_XSMADDASP(float %A, float %B, float %C, float %D) {
+ %E = fmul float %A, %B ; <float> [#uses=2]
+ %F = fadd float %E, %C ; <float> [#uses=1]
+ %G = fsub float %E, %D ; <float> [#uses=1]
+ %H = call float @dummy4(float %F, float %G) ; <float> [#uses=1]
+ ret float %H
+; CHECK-P8-LABEL: test_XSMADDASP:
+; CHECK-P8: xsmaddasp
+; CHECK-P8-NEXT: xsmsubmsp
+}
+
+define float @test_XSMSUBASP(float %A, float %B, float %C, float %D) {
+ %E = fmul float %A, %B ; <float> [#uses=2]
+ %F = fsub float %E, %C ; <float> [#uses=1]
+ %G = fsub float %E, %D ; <float> [#uses=1]
+ %H = call float @dummy4(float %F, float %G) ; <float> [#uses=1]
+ ret float %H
+; CHECK-P8-LABEL: test_XSMSUBASP:
+; CHECK-P8: xsmsubasp
+; CHECK-P8-NEXT: xsmsubmsp
+}
+
+define float @test_XSNMADDMSP(float %A, float %B, float %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fadd float %D, %C ; <float> [#uses=1]
+ %F = fsub float -0.000000e+00, %E ; <float> [#uses=1]
+ ret float %F
+; CHECK-P8-LABEL: test_XSNMADDMSP:
+; CHECK-P8: xsnmaddmsp
+; CHECK-P8-NEXT: blr
+}
+
+define float @test_XSNMSUBMSP(float %A, float %B, float %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fsub float %D, %C ; <float> [#uses=1]
+ %F = fsub float -0.000000e+00, %E ; <float> [#uses=1]
+ ret float %F
+; CHECK-P8-LABEL: test_XSNMSUBMSP:
+; CHECK-P8: xsnmsubmsp
+; CHECK-P8-NEXT: blr
+}
+
+define float @test_XSNMADDASP(float %A, float %B, float %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fadd float %D, %C ; <float> [#uses=1]
+ %F = fsub float -0.000000e+00, %E ; <float> [#uses=1]
+ %H = call float @dummy4(float %E, float %F) ; <float> [#uses=1]
+ ret float %F
+; CHECK-P8-LABEL: test_XSNMADDASP:
+; CHECK-P8: xsnmaddasp
+}
+
+define float @test_XSNMSUBASP(float %A, float %B, float %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fsub float %D, %C ; <float> [#uses=1]
+ %F = fsub float -0.000000e+00, %E ; <float> [#uses=1]
+ %H = call float @dummy4(float %E, float %F) ; <float> [#uses=1]
+ ret float %F
+; CHECK-P8-LABEL: test_XSNMSUBASP:
+; CHECK-P8: xsnmsubasp
+}
diff --git a/test/CodeGen/PowerPC/vsx-fma-sp.ll b/test/CodeGen/PowerPC/vsx-fma-sp.ll
new file mode 100644
index 0000000000000..1c3e457f92cb8
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-fma-sp.ll
@@ -0,0 +1,167 @@
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=+vsx | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=+vsx -fast-isel -O0 | FileCheck -check-prefix=CHECK-FISL %s
+define void @test1sp(float %a, float %b, float %c, float %e, float* nocapture %d) #0 {
+entry:
+ %0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
+ store float %0, float* %d, align 4
+ %1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
+ %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
+ store float %1, float* %arrayidx1, align 4
+ ret void
+
+; CHECK-LABEL: @test1sp
+; CHECK-DAG: li [[C1:[0-9]+]], 4
+; CHECK-DAG: xsmaddmsp 3, 2, 1
+; CHECK-DAG: xsmaddasp 1, 2, 4
+; CHECK-DAG: stxsspx 3, 0, 7
+; CHECK-DAG: stxsspx 1, 7, [[C1]]
+; CHECK: blr
+
+; CHECK-FISL-LABEL: @test1sp
+; CHECK-FISL-DAG: fmr 0, 1
+; CHECK-FISL-DAG: xsmaddasp 0, 2, 3
+; CHECK-FISL-DAG: stxsspx 0, 0, 7
+; CHECK-FISL-DAG: xsmaddasp 1, 2, 4
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 4
+; CHECK-FISL-DAG: stxsspx 1, 7, [[C1]]
+; CHECK-FISL: blr
+}
+
+define void @test2sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+entry:
+ %0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
+ store float %0, float* %d, align 4
+ %1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
+ %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
+ store float %1, float* %arrayidx1, align 4
+ %2 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
+ %arrayidx2 = getelementptr inbounds float, float* %d, i64 2
+ store float %2, float* %arrayidx2, align 4
+ ret void
+
+; CHECK-LABEL: @test2sp
+; CHECK-DAG: li [[C1:[0-9]+]], 4
+; CHECK-DAG: li [[C2:[0-9]+]], 8
+; CHECK-DAG: xsmaddmsp 3, 2, 1
+; CHECK-DAG: xsmaddmsp 4, 2, 1
+; CHECK-DAG: xsmaddasp 1, 2, 5
+; CHECK-DAG: stxsspx 3, 0, 8
+; CHECK-DAG: stxsspx 4, 8, [[C1]]
+; CHECK-DAG: stxsspx 1, 8, [[C2]]
+; CHECK: blr
+
+; CHECK-FISL-LABEL: @test2sp
+; CHECK-FISL-DAG: fmr 0, 1
+; CHECK-FISL-DAG: xsmaddasp 0, 2, 3
+; CHECK-FISL-DAG: stxsspx 0, 0, 8
+; CHECK-FISL-DAG: fmr 0, 1
+; CHECK-FISL-DAG: xsmaddasp 0, 2, 4
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 4
+; CHECK-FISL-DAG: stxsspx 0, 8, [[C1]]
+; CHECK-FISL-DAG: xsmaddasp 1, 2, 5
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 8
+; CHECK-FISL-DAG: stxsspx 1, 8, [[C2]]
+; CHECK-FISL: blr
+}
+
+define void @test3sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+entry:
+ %0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
+ store float %0, float* %d, align 4
+ %1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
+ %2 = tail call float @llvm.fma.f32(float %b, float %c, float %1)
+ %arrayidx1 = getelementptr inbounds float, float* %d, i64 3
+ store float %2, float* %arrayidx1, align 4
+ %3 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
+ %arrayidx2 = getelementptr inbounds float, float* %d, i64 2
+ store float %3, float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float, float* %d, i64 1
+ store float %1, float* %arrayidx3, align 4
+ ret void
+
+; CHECK-LABEL: @test3sp
+; CHECK-DAG: fmr [[F1:[0-9]+]], 1
+; CHECK-DAG: li [[C1:[0-9]+]], 12
+; CHECK-DAG: li [[C2:[0-9]+]], 8
+; CHECK-DAG: li [[C3:[0-9]+]], 4
+; CHECK-DAG: xsmaddmsp 4, 2, 1
+; CHECK-DAG: xsmaddasp 1, 2, 5
+
+; Note: We could convert this next FMA to M-type as well, but it would require
+; re-ordering the instructions.
+; CHECK-DAG: xsmaddasp [[F1]], 2, 3
+
+; CHECK-DAG: xsmaddmsp 3, 2, 4
+; CHECK-DAG: stxsspx [[F1]], 0, 8
+; CHECK-DAG: stxsspx 3, 8, [[C1]]
+; CHECK-DAG: stxsspx 1, 8, [[C2]]
+; CHECK-DAG: stxsspx 4, 8, [[C3]]
+; CHECK: blr
+
+; CHECK-FISL-LABEL: @test3sp
+; CHECK-FISL-DAG: fmr [[F1:[0-9]+]], 1
+; CHECK-FISL-DAG: xsmaddasp [[F1]], 2, 4
+; CHECK-FISL-DAG: fmr 4, [[F1]]
+; CHECK-FISL-DAG: xsmaddasp 4, 2, 3
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 12
+; CHECK-FISL-DAG: stxsspx 4, 8, [[C1]]
+; CHECK-FISL-DAG: xsmaddasp 1, 2, 5
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 8
+; CHECK-FISL-DAG: stxsspx 1, 8, [[C2]]
+; CHECK-FISL-DAG: li [[C3:[0-9]+]], 4
+; CHECK-FISL-DAG: stxsspx 0, 8, [[C3]]
+; CHECK-FISL: blr
+}
+
+define void @test4sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+entry:
+ %0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
+ store float %0, float* %d, align 4
+ %1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
+ %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
+ store float %1, float* %arrayidx1, align 4
+ %2 = tail call float @llvm.fma.f32(float %b, float %c, float %1)
+ %arrayidx3 = getelementptr inbounds float, float* %d, i64 3
+ store float %2, float* %arrayidx3, align 4
+ %3 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
+ %arrayidx4 = getelementptr inbounds float, float* %d, i64 2
+ store float %3, float* %arrayidx4, align 4
+ ret void
+
+; CHECK-LABEL: @test4sp
+; CHECK-DAG: fmr [[F1:[0-9]+]], 1
+; CHECK-DAG: li [[C1:[0-9]+]], 4
+; CHECK-DAG: li [[C2:[0-9]+]], 8
+; CHECK-DAG: xsmaddmsp 4, 2, 1
+
+; Note: We could convert this next FMA to M-type as well, but it would require
+; re-ordering the instructions.
+; CHECK-DAG: xsmaddasp 1, 2, 5
+
+; CHECK-DAG: xsmaddasp [[F1]], 2, 3
+; CHECK-DAG: stxsspx [[F1]], 0, 8
+; CHECK-DAG: stxsspx 4, 8, [[C1]]
+; CHECK-DAG: li [[C3:[0-9]+]], 12
+; CHECK-DAG: xsmaddasp 4, 2, 3
+; CHECK-DAG: stxsspx 4, 8, [[C3]]
+; CHECK-DAG: stxsspx 1, 8, [[C2]]
+; CHECK: blr
+
+; CHECK-FISL-LABEL: @test4sp
+; CHECK-FISL-DAG: fmr [[F1:[0-9]+]], 1
+; CHECK-FISL-DAG: xsmaddasp [[F1]], 2, 3
+; CHECK-FISL-DAG: stxsspx 0, 0, 8
+; CHECK-FISL-DAG: fmr [[F1]], 1
+; CHECK-FISL-DAG: xsmaddasp [[F1]], 2, 4
+; CHECK-FISL-DAG: li [[C3:[0-9]+]], 4
+; CHECK-FISL-DAG: stxsspx 0, 8, [[C3]]
+; CHECK-FISL-DAG: xsmaddasp 0, 2, 3
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 12
+; CHECK-FISL-DAG: stxsspx 0, 8, [[C1]]
+; CHECK-FISL-DAG: xsmaddasp 1, 2, 5
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 8
+; CHECK-FISL-DAG: stxsspx 1, 8, [[C2]]
+; CHECK-FISL: blr
+}
+
+declare float @llvm.fma.f32(float, float, float) #0
diff --git a/test/CodeGen/R600/cgp-addressing-modes.ll b/test/CodeGen/R600/cgp-addressing-modes.ll
new file mode 100644
index 0000000000000..3d36bd19937e4
--- /dev/null
+++ b/test/CodeGen/R600/cgp-addressing-modes.ll
@@ -0,0 +1,242 @@
+; RUN: opt -S -codegenprepare -mtriple=amdgcn-unknown-unknown < %s | FileCheck -check-prefix=OPT %s
+; RUN: llc -march=amdgcn -mattr=-promote-alloca < %s | FileCheck -check-prefix=GCN %s
+
+declare i32 @llvm.r600.read.tidig.x() #0
+
+; OPT-LABEL: @test_sink_global_small_offset_i32(
+; OPT-NOT: getelementptr i32, i32 addrspace(1)* %in
+; OPT: br i1
+; OPT: ptrtoint
+
+; GCN-LABEL: {{^}}test_sink_global_small_offset_i32:
+; GCN: {{^}}BB0_2:
+define void @test_sink_global_small_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %cond) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i64 7
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = load i32, i32 addrspace(1)* %in.gep
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(1)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; OPT-LABEL: @test_sink_global_small_max_i32_ds_offset(
+; OPT: %in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 65535
+; OPT: br i1
+
+; GCN-LABEL: {{^}}test_sink_global_small_max_i32_ds_offset:
+; GCN: s_and_saveexec_b64
+; GCN: buffer_load_sbyte {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s{{[0-9]+$}}
+; GCN: {{^}}BB1_2:
+; GCN: s_or_b64 exec
+define void @test_sink_global_small_max_i32_ds_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in, i32 %cond) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 99999
+ %in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 65535
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = load i8, i8 addrspace(1)* %in.gep
+ %tmp2 = sext i8 %tmp1 to i32
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp2, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(1)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_sink_global_small_max_mubuf_offset:
+; GCN: s_and_saveexec_b64
+; GCN: buffer_load_sbyte {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:4095{{$}}
+; GCN: {{^}}BB2_2:
+; GCN: s_or_b64 exec
+define void @test_sink_global_small_max_mubuf_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in, i32 %cond) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 1024
+ %in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 4095
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = load i8, i8 addrspace(1)* %in.gep
+ %tmp2 = sext i8 %tmp1 to i32
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp2, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(1)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_sink_global_small_max_plus_1_mubuf_offset:
+; GCN: s_and_saveexec_b64
+; GCN: buffer_load_sbyte {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s{{[0-9]+$}}
+; GCN: {{^}}BB3_2:
+; GCN: s_or_b64 exec
+define void @test_sink_global_small_max_plus_1_mubuf_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in, i32 %cond) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 99999
+ %in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 4096
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = load i8, i8 addrspace(1)* %in.gep
+ %tmp2 = sext i8 %tmp1 to i32
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp2, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(1)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; OPT-LABEL: @test_no_sink_flat_small_offset_i32(
+; OPT: getelementptr i32, i32 addrspace(4)* %in
+; OPT: br i1
+; OPT-NOT: ptrtoint
+
+; GCN-LABEL: {{^}}test_no_sink_flat_small_offset_i32:
+; GCN: flat_load_dword
+; GCN: {{^}}BB4_2:
+
+define void @test_no_sink_flat_small_offset_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %in, i32 %cond) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(4)* %out, i64 999999
+ %in.gep = getelementptr i32, i32 addrspace(4)* %in, i64 7
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = load i32, i32 addrspace(4)* %in.gep
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(4)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; OPT-LABEL: @test_sink_scratch_small_offset_i32(
+; OPT-NOT: getelementptr [512 x i32]
+; OPT: br i1
+; OPT: ptrtoint
+
+; GCN-LABEL: {{^}}test_sink_scratch_small_offset_i32:
+; GCN: s_and_saveexec_b64
+; GCN: buffer_store_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:4092{{$}}
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:4092{{$}}
+; GCN: {{^}}BB5_2:
+define void @test_sink_scratch_small_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %cond, i32 %arg) {
+entry:
+ %alloca = alloca [512 x i32], align 4
+ %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i64 999998
+ %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i64 999999
+ %add.arg = add i32 %arg, 8
+ %alloca.gep = getelementptr [512 x i32], [512 x i32]* %alloca, i32 0, i32 1023
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ store volatile i32 123, i32* %alloca.gep
+ %tmp1 = load volatile i32, i32* %alloca.gep
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(1)* %out.gep.0
+ %load = load volatile i32, i32* %alloca.gep
+ store i32 %load, i32 addrspace(1)* %out.gep.1
+ br label %done
+
+done:
+ ret void
+}
+
+; OPT-LABEL: @test_no_sink_scratch_large_offset_i32(
+; OPT: %alloca.gep = getelementptr [512 x i32], [512 x i32]* %alloca, i32 0, i32 1024
+; OPT: br i1
+; OPT-NOT: ptrtoint
+
+; GCN-LABEL: {{^}}test_no_sink_scratch_large_offset_i32:
+; GCN: s_and_saveexec_b64
+; GCN: buffer_store_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+; GCN: {{^}}BB6_2:
+define void @test_no_sink_scratch_large_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %cond, i32 %arg) {
+entry:
+ %alloca = alloca [512 x i32], align 4
+ %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i64 999998
+ %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i64 999999
+ %add.arg = add i32 %arg, 8
+ %alloca.gep = getelementptr [512 x i32], [512 x i32]* %alloca, i32 0, i32 1024
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ store volatile i32 123, i32* %alloca.gep
+ %tmp1 = load volatile i32, i32* %alloca.gep
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(1)* %out.gep.0
+ %load = load volatile i32, i32* %alloca.gep
+ store i32 %load, i32 addrspace(1)* %out.gep.1
+ br label %done
+
+done:
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_sink_global_vreg_sreg_i32:
+; GCN: s_and_saveexec_b64
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; GCN: {{^}}BB7_2:
+define void @test_sink_global_vreg_sreg_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %offset, i32 %cond) {
+entry:
+ %offset.ext = zext i32 %offset to i64
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
+ %in.gep = getelementptr i32, i32 addrspace(1)* %in, i64 %offset.ext
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = load i32, i32 addrspace(1)* %in.gep
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(1)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/R600/coalescer_remat.ll b/test/CodeGen/R600/coalescer_remat.ll
index f78a77b361540..96730bcf2e8ff 100644
--- a/test/CodeGen/R600/coalescer_remat.ll
+++ b/test/CodeGen/R600/coalescer_remat.ll
@@ -1,5 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -o - %s | FileCheck %s
-target triple="amdgcn--"
+; RUN: llc -march=amdgcn -verify-machineinstrs -mtriple=amdgcn-- -o - %s | FileCheck %s
declare float @llvm.fma.f32(float, float, float)
@@ -12,7 +11,8 @@ declare float @llvm.fma.f32(float, float, float)
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0
-; CHECK: ; NumVgprs: 12
+; It's probably OK if this is slightly higher:
+; CHECK: ; NumVgprs: 9
define void @foobar(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in, i32 %flag) {
entry:
%cmpflag = icmp eq i32 %flag, 1
diff --git a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
index 0aecc189e0bf7..5851720926762 100644
--- a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
+++ b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
@@ -1,12 +1,10 @@
-; RUN: opt -codegenprepare -S -o - %s | FileCheck --check-prefix=OPT %s
-; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-LLC %s
+; RUN: opt -mtriple=amdgcn-- -codegenprepare -S < %s | FileCheck -check-prefix=OPT %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI-LLC %s
-target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
-target triple = "r600--"
-
-; OPT-LABEL: @test
+; OPT-LABEL: @test(
; OPT: mul nsw i32
; OPT-NEXT: sext
+
; SI-LLC-LABEL: {{^}}test:
; SI-LLC: s_mul_i32
; SI-LLC-NOT: mul
diff --git a/test/CodeGen/R600/fmul.ll b/test/CodeGen/R600/fmul.ll
index 68ebc4dedfe05..addc409c9eb15 100644
--- a/test/CodeGen/R600/fmul.ll
+++ b/test/CodeGen/R600/fmul.ll
@@ -73,4 +73,20 @@ define void @test_mul_2_k_inv(float addrspace(1)* %out, float %x) #0 {
ret void
}
+; There should be three multiplies here; %a should be used twice (once
+; negated), not duplicated into mul x, 5.0 and mul x, -5.0.
+; FUNC-LABEL: {{^}}test_mul_twouse:
+; SI: v_mul_f32
+; SI: v_mul_f32
+; SI: v_mul_f32
+; SI-NOT: v_mul_f32
+define void @test_mul_twouse(float addrspace(1)* %out, float %x, float %y) #0 {
+ %a = fmul float %x, 5.0
+ %b = fsub float -0.0, %a
+ %c = fmul float %b, %y
+ %d = fmul float %c, %a
+ store float %d, float addrspace(1)* %out
+ ret void
+}
+
attributes #0 = { "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" }
diff --git a/test/CodeGen/R600/half.ll b/test/CodeGen/R600/half.ll
index 42ee788e88d5b..bf8f11860b50d 100644
--- a/test/CodeGen/R600/half.ll
+++ b/test/CodeGen/R600/half.ll
@@ -1,62 +1,525 @@
-; RUN: llc < %s -march=amdgcn -mcpu=SI | FileCheck %s
-; RUN: llc < %s -march=amdgcn -mcpu=tonga | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-define void @test_load_store(half addrspace(1)* %in, half addrspace(1)* %out) {
-; CHECK-LABEL: {{^}}test_load_store:
-; CHECK: buffer_load_ushort [[TMP:v[0-9]+]]
-; CHECK: buffer_store_short [[TMP]]
+; half args should be promoted to float
+
+; GCN-LABEL: {{^}}load_f16_arg:
+; GCN: s_load_dword [[ARG:s[0-9]+]]
+; GCN: v_cvt_f16_f32_e32 [[CVT:v[0-9]+]], [[ARG]]
+; GCN: buffer_store_short [[CVT]]
+define void @load_f16_arg(half addrspace(1)* %out, half %arg) #0 {
+ store half %arg, half addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}load_v2f16_arg:
+; GCN-DAG: buffer_load_ushort [[V0:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:44
+; GCN-DAG: buffer_load_ushort [[V1:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:46
+; GCN-DAG: buffer_store_short [[V0]], s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; GCN-DAG: buffer_store_short [[V1]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
+; GCN: s_endpgm
+define void @load_v2f16_arg(<2 x half> addrspace(1)* %out, <2 x half> %arg) #0 {
+ store <2 x half> %arg, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}load_v3f16_arg:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN-NOT: buffer_load
+; GCN-DAG: buffer_store_dword
+; GCN-DAG: buffer_store_short
+; GCN-NOT: buffer_store
+; GCN: s_endpgm
+define void @load_v3f16_arg(<3 x half> addrspace(1)* %out, <3 x half> %arg) #0 {
+ store <3 x half> %arg, <3 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}load_v4f16_arg:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: s_endpgm
+define void @load_v4f16_arg(<4 x half> addrspace(1)* %out, <4 x half> %arg) #0 {
+ store <4 x half> %arg, <4 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}load_v8f16_arg:
+define void @load_v8f16_arg(<8 x half> addrspace(1)* %out, <8 x half> %arg) #0 {
+ store <8 x half> %arg, <8 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_v2f16_arg:
+define void @extload_v2f16_arg(<2 x float> addrspace(1)* %out, <2 x half> %in) #0 {
+ %fpext = fpext <2 x half> %in to <2 x float>
+ store <2 x float> %fpext, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_f16_to_f32_arg:
+define void @extload_f16_to_f32_arg(float addrspace(1)* %out, half %arg) #0 {
+ %ext = fpext half %arg to float
+ store float %ext, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_v2f16_to_v2f32_arg:
+define void @extload_v2f16_to_v2f32_arg(<2 x float> addrspace(1)* %out, <2 x half> %arg) #0 {
+ %ext = fpext <2 x half> %arg to <2 x float>
+ store <2 x float> %ext, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_v3f16_to_v3f32_arg:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN-NOT: buffer_load
+; GCN: v_cvt_f32_f16_e32
+; GCN: v_cvt_f32_f16_e32
+; GCN: v_cvt_f32_f16_e32
+; GCN-NOT: v_cvt_f32_f16
+; GCN-DAG: buffer_store_dword
+; GCN-DAG: buffer_store_dwordx2
+; GCN: s_endpgm
+define void @extload_v3f16_to_v3f32_arg(<3 x float> addrspace(1)* %out, <3 x half> %arg) #0 {
+ %ext = fpext <3 x half> %arg to <3 x float>
+ store <3 x float> %ext, <3 x float> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_v4f16_to_v4f32_arg:
+define void @extload_v4f16_to_v4f32_arg(<4 x float> addrspace(1)* %out, <4 x half> %arg) #0 {
+ %ext = fpext <4 x half> %arg to <4 x float>
+ store <4 x float> %ext, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_v8f16_to_v8f32_arg:
+define void @extload_v8f16_to_v8f32_arg(<8 x float> addrspace(1)* %out, <8 x half> %arg) #0 {
+ %ext = fpext <8 x half> %arg to <8 x float>
+ store <8 x float> %ext, <8 x float> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_f16_to_f64_arg:
+define void @extload_f16_to_f64_arg(double addrspace(1)* %out, half %arg) #0 {
+ %ext = fpext half %arg to double
+ store double %ext, double addrspace(1)* %out
+ ret void
+}
+; GCN-LABEL: {{^}}extload_v2f16_to_v2f64_arg:
+define void @extload_v2f16_to_v2f64_arg(<2 x double> addrspace(1)* %out, <2 x half> %arg) #0 {
+ %ext = fpext <2 x half> %arg to <2 x double>
+ store <2 x double> %ext, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_v3f16_to_v3f64_arg:
+define void @extload_v3f16_to_v3f64_arg(<3 x double> addrspace(1)* %out, <3 x half> %arg) #0 {
+ %ext = fpext <3 x half> %arg to <3 x double>
+ store <3 x double> %ext, <3 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_v4f16_to_v4f64_arg:
+define void @extload_v4f16_to_v4f64_arg(<4 x double> addrspace(1)* %out, <4 x half> %arg) #0 {
+ %ext = fpext <4 x half> %arg to <4 x double>
+ store <4 x double> %ext, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}extload_v8f16_to_v8f64_arg:
+define void @extload_v8f16_to_v8f64_arg(<8 x double> addrspace(1)* %out, <8 x half> %arg) #0 {
+ %ext = fpext <8 x half> %arg to <8 x double>
+ store <8 x double> %ext, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_load_store_f16:
+; GCN: buffer_load_ushort [[TMP:v[0-9]+]]
+; GCN: buffer_store_short [[TMP]]
+define void @global_load_store_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%val = load half, half addrspace(1)* %in
- store half %val, half addrspace(1) * %out
+ store half %val, half addrspace(1)* %out
ret void
}
-define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) {
-; CHECK-LABEL: {{^}}test_bitcast_from_half:
-; CHECK: buffer_load_ushort [[TMP:v[0-9]+]]
-; CHECK: buffer_store_short [[TMP]]
- %val = load half, half addrspace(1) * %in
- %val_int = bitcast half %val to i16
- store i16 %val_int, i16 addrspace(1)* %out
+; GCN-LABEL: {{^}}global_load_store_v2f16:
+; GCN: buffer_load_dword [[TMP:v[0-9]+]]
+; GCN: buffer_store_dword [[TMP]]
+define void @global_load_store_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %in
+ store <2 x half> %val, <2 x half> addrspace(1)* %out
ret void
}
-define void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) {
-; CHECK-LABEL: {{^}}test_bitcast_to_half:
-; CHECK: buffer_load_ushort [[TMP:v[0-9]+]]
-; CHECK: buffer_store_short [[TMP]]
- %val = load i16, i16 addrspace(1)* %in
- %val_fp = bitcast i16 %val to half
- store half %val_fp, half addrspace(1)* %out
+; GCN-LABEL: {{^}}global_load_store_v4f16:
+; GCN: buffer_load_dwordx2 [[TMP:v\[[0-9]+:[0-9]+\]]]
+; GCN: buffer_store_dwordx2 [[TMP]]
+define void @global_load_store_v4f16(<4 x half> addrspace(1)* %in, <4 x half> addrspace(1)* %out) #0 {
+ %val = load <4 x half>, <4 x half> addrspace(1)* %in
+ store <4 x half> %val, <4 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_load_store_v8f16:
+; GCN: buffer_load_dwordx4 [[TMP:v\[[0-9]+:[0-9]+\]]]
+; GCN: buffer_store_dwordx4 [[TMP:v\[[0-9]+:[0-9]+\]]]
+; GCN: s_endpgm
+define void @global_load_store_v8f16(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
+ %val = load <8 x half>, <8 x half> addrspace(1)* %in
+ store <8 x half> %val, <8 x half> addrspace(1)* %out
ret void
}
-define void @test_extend32(half addrspace(1)* %in, float addrspace(1)* %out) {
-; CHECK-LABEL: {{^}}test_extend32:
-; CHECK: v_cvt_f32_f16_e32
+; GCN-LABEL: {{^}}global_extload_f16_to_f32:
+; GCN: buffer_load_ushort [[LOAD:v[0-9]+]]
+; GCN: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[LOAD]]
+; GCN: buffer_store_dword [[CVT]]
+define void @global_extload_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %in) #0 {
+ %val = load half, half addrspace(1)* %in
+ %cvt = fpext half %val to float
+ store float %cvt, float addrspace(1)* %out
+ ret void
+}
- %val16 = load half, half addrspace(1)* %in
- %val32 = fpext half %val16 to float
- store float %val32, float addrspace(1)* %out
+; GCN-LABEL: {{^}}global_extload_v2f16_to_v2f32:
+define void @global_extload_v2f16_to_v2f32(<2 x float> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %in
+ %cvt = fpext <2 x half> %val to <2 x float>
+ store <2 x float> %cvt, <2 x float> addrspace(1)* %out
ret void
}
-define void @test_extend64(half addrspace(1)* %in, double addrspace(1)* %out) {
-; CHECK-LABEL: {{^}}test_extend64:
-; CHECK: v_cvt_f32_f16_e32
-; CHECK: v_cvt_f64_f32_e32
+; GCN-LABEL: {{^}}global_extload_v3f16_to_v3f32:
+define void @global_extload_v3f16_to_v3f32(<3 x float> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
+ %val = load <3 x half>, <3 x half> addrspace(1)* %in
+ %cvt = fpext <3 x half> %val to <3 x float>
+ store <3 x float> %cvt, <3 x float> addrspace(1)* %out
+ ret void
+}
- %val16 = load half, half addrspace(1)* %in
- %val64 = fpext half %val16 to double
- store double %val64, double addrspace(1)* %out
+; GCN-LABEL: {{^}}global_extload_v4f16_to_v4f32:
+define void @global_extload_v4f16_to_v4f32(<4 x float> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
+ %val = load <4 x half>, <4 x half> addrspace(1)* %in
+ %cvt = fpext <4 x half> %val to <4 x float>
+ store <4 x float> %cvt, <4 x float> addrspace(1)* %out
ret void
}
-define void @test_trunc32(float addrspace(1)* %in, half addrspace(1)* %out) {
-; CHECK-LABEL: {{^}}test_trunc32:
-; CHECK: v_cvt_f16_f32_e32
+; GCN-LABEL: {{^}}global_extload_v8f16_to_v8f32:
+define void @global_extload_v8f16_to_v8f32(<8 x float> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
+ %val = load <8 x half>, <8 x half> addrspace(1)* %in
+ %cvt = fpext <8 x half> %val to <8 x float>
+ store <8 x float> %cvt, <8 x float> addrspace(1)* %out
+ ret void
+}
- %val32 = load float, float addrspace(1)* %in
- %val16 = fptrunc float %val32 to half
- store half %val16, half addrspace(1)* %out
+; GCN-LABEL: {{^}}global_extload_v16f16_to_v16f32:
+define void @global_extload_v16f16_to_v16f32(<16 x float> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
+ %val = load <16 x half>, <16 x half> addrspace(1)* %in
+ %cvt = fpext <16 x half> %val to <16 x float>
+ store <16 x float> %cvt, <16 x float> addrspace(1)* %out
ret void
}
+
+; GCN-LABEL: {{^}}global_extload_f16_to_f64:
+; GCN: buffer_load_ushort [[LOAD:v[0-9]+]]
+; GCN: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[LOAD]]
+; GCN: v_cvt_f64_f32_e32 [[CVT1:v\[[0-9]+:[0-9]+\]]], [[CVT0]]
+; GCN: buffer_store_dwordx2 [[CVT1]]
+define void @global_extload_f16_to_f64(double addrspace(1)* %out, half addrspace(1)* %in) #0 {
+ %val = load half, half addrspace(1)* %in
+ %cvt = fpext half %val to double
+ store double %cvt, double addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_extload_v2f16_to_v2f64:
+define void @global_extload_v2f16_to_v2f64(<2 x double> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %in
+ %cvt = fpext <2 x half> %val to <2 x double>
+ store <2 x double> %cvt, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_extload_v3f16_to_v3f64:
+define void @global_extload_v3f16_to_v3f64(<3 x double> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
+ %val = load <3 x half>, <3 x half> addrspace(1)* %in
+ %cvt = fpext <3 x half> %val to <3 x double>
+ store <3 x double> %cvt, <3 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_extload_v4f16_to_v4f64:
+define void @global_extload_v4f16_to_v4f64(<4 x double> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
+ %val = load <4 x half>, <4 x half> addrspace(1)* %in
+ %cvt = fpext <4 x half> %val to <4 x double>
+ store <4 x double> %cvt, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_extload_v8f16_to_v8f64:
+define void @global_extload_v8f16_to_v8f64(<8 x double> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
+ %val = load <8 x half>, <8 x half> addrspace(1)* %in
+ %cvt = fpext <8 x half> %val to <8 x double>
+ store <8 x double> %cvt, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_extload_v16f16_to_v16f64:
+define void @global_extload_v16f16_to_v16f64(<16 x double> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
+ %val = load <16 x half>, <16 x half> addrspace(1)* %in
+ %cvt = fpext <16 x half> %val to <16 x double>
+ store <16 x double> %cvt, <16 x double> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_f32_to_f16:
+; GCN: buffer_load_dword [[LOAD:v[0-9]+]]
+; GCN: v_cvt_f16_f32_e32 [[CVT:v[0-9]+]], [[LOAD]]
+; GCN: buffer_store_short [[CVT]]
+define void @global_truncstore_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %val = load float, float addrspace(1)* %in
+ %cvt = fptrunc float %val to half
+ store half %cvt, half addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v2f32_to_v2f16:
+; GCN: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
+; GCN-DAG: v_cvt_f16_f32_e32 [[CVT0:v[0-9]+]], v[[LO]]
+; GCN-DAG: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], v[[HI]]
+; GCN-DAG: buffer_store_short [[CVT0]]
+; GCN-DAG: buffer_store_short [[CVT1]]
+; GCN: s_endpgm
+define void @global_truncstore_v2f32_to_v2f16(<2 x half> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
+ %val = load <2 x float>, <2 x float> addrspace(1)* %in
+ %cvt = fptrunc <2 x float> %val to <2 x half>
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Shouldn't do 4th conversion
+; GCN-LABEL: {{^}}global_truncstore_v3f32_to_v3f16:
+; GCN: buffer_load_dwordx4
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: buffer_store_short
+; GCN: buffer_store_dword
+; GCN: s_endpgm
+define void @global_truncstore_v3f32_to_v3f16(<3 x half> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
+ %val = load <3 x float>, <3 x float> addrspace(1)* %in
+ %cvt = fptrunc <3 x float> %val to <3 x half>
+ store <3 x half> %cvt, <3 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v4f32_to_v4f16:
+; GCN: buffer_load_dwordx4
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: s_endpgm
+define void @global_truncstore_v4f32_to_v4f16(<4 x half> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
+ %val = load <4 x float>, <4 x float> addrspace(1)* %in
+ %cvt = fptrunc <4 x float> %val to <4 x half>
+ store <4 x half> %cvt, <4 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v8f32_to_v8f16:
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: s_endpgm
+define void @global_truncstore_v8f32_to_v8f16(<8 x half> addrspace(1)* %out, <8 x float> addrspace(1)* %in) #0 {
+ %val = load <8 x float>, <8 x float> addrspace(1)* %in
+ %cvt = fptrunc <8 x float> %val to <8 x half>
+ store <8 x half> %cvt, <8 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v16f32_to_v16f16:
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: s_endpgm
+define void @global_truncstore_v16f32_to_v16f16(<16 x half> addrspace(1)* %out, <16 x float> addrspace(1)* %in) #0 {
+ %val = load <16 x float>, <16 x float> addrspace(1)* %in
+ %cvt = fptrunc <16 x float> %val to <16 x half>
+ store <16 x half> %cvt, <16 x half> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Unsafe math should fold conversions away
+; GCN-LABEL: {{^}}fadd_f16:
+; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
+; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
+; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
+; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
+; SI: v_add_f32
+; GCN: s_endpgm
+define void @fadd_f16(half addrspace(1)* %out, half %a, half %b) #0 {
+ %add = fadd half %a, %b
+ store half %add, half addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_v2f16:
+; SI: v_add_f32
+; SI: v_add_f32
+; GCN: s_endpgm
+define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %a, <2 x half> %b) #0 {
+ %add = fadd <2 x half> %a, %b
+ store <2 x half> %add, <2 x half> addrspace(1)* %out, align 8
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_v4f16:
+; SI: v_add_f32
+; SI: v_add_f32
+; SI: v_add_f32
+; SI: v_add_f32
+; GCN: s_endpgm
+define void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
+ %b_ptr = getelementptr <4 x half>, <4 x half> addrspace(1)* %in, i32 1
+ %a = load <4 x half>, <4 x half> addrspace(1)* %in, align 16
+ %b = load <4 x half>, <4 x half> addrspace(1)* %b_ptr, align 16
+ %result = fadd <4 x half> %a, %b
+ store <4 x half> %result, <4 x half> addrspace(1)* %out, align 16
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_v8f16:
+; SI: v_add_f32
+; SI: v_add_f32
+; SI: v_add_f32
+; SI: v_add_f32
+; SI: v_add_f32
+; SI: v_add_f32
+; SI: v_add_f32
+; SI: v_add_f32
+; GCN: s_endpgm
+define void @fadd_v8f16(<8 x half> addrspace(1)* %out, <8 x half> %a, <8 x half> %b) #0 {
+ %add = fadd <8 x half> %a, %b
+ store <8 x half> %add, <8 x half> addrspace(1)* %out, align 32
+ ret void
+}
+
+; GCN-LABEL: {{^}}fsub_f16:
+; GCN: v_subrev_f32_e32
+; GCN: s_endpgm
+define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+ %b_ptr = getelementptr half, half addrspace(1)* %in, i32 1
+ %a = load half, half addrspace(1)* %in
+ %b = load half, half addrspace(1)* %b_ptr
+ %sub = fsub half %a, %b
+ store half %sub, half addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_bitcast_from_half:
+; GCN: buffer_load_ushort [[TMP:v[0-9]+]]
+; GCN: buffer_store_short [[TMP]]
+define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) #0 {
+ %val = load half, half addrspace(1)* %in
+ %val_int = bitcast half %val to i16
+ store i16 %val_int, i16 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_bitcast_to_half:
+; GCN: buffer_load_ushort [[TMP:v[0-9]+]]
+; GCN: buffer_store_short [[TMP]]
+define void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
+ %val = load i16, i16 addrspace(1)* %in
+ %val_fp = bitcast i16 %val to half
+ store half %val_fp, half addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/R600/imm.ll b/test/CodeGen/R600/imm.ll
index 8917cd6dba339..12eed550eb1fe 100644
--- a/test/CodeGen/R600/imm.ll
+++ b/test/CodeGen/R600/imm.ll
@@ -36,7 +36,7 @@ define void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) {
; CHECK-LABEL: {{^}}store_inline_imm_neg_0.0_i32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0x80000000
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) {
store i32 -2147483648, i32 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/R600/loop-address.ll b/test/CodeGen/R600/loop-address.ll
index 7fadb8dba7b89..f60d574497dee 100644
--- a/test/CodeGen/R600/loop-address.ll
+++ b/test/CodeGen/R600/loop-address.ll
@@ -1,13 +1,10 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=redwood < %s | FileCheck %s
;CHECK: ALU_PUSH
;CHECK: LOOP_START_DX10 @11
;CHECK: LOOP_BREAK @10
;CHECK: POP @10
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
-target triple = "r600--"
-
define void @loop_ge(i32 addrspace(1)* nocapture %out, i32 %iterations) #0 {
entry:
%cmp5 = icmp sgt i32 %iterations, 0
diff --git a/test/CodeGen/R600/loop-idiom.ll b/test/CodeGen/R600/loop-idiom.ll
index 810b34fed8655..5fd9806813cd6 100644
--- a/test/CodeGen/R600/loop-idiom.ll
+++ b/test/CodeGen/R600/loop-idiom.ll
@@ -2,10 +2,6 @@
; RUN: opt -basicaa -loop-idiom -S < %s -march=amdgcn -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
; RUN: opt -basicaa -loop-idiom -S < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
-target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
-target triple = "r600--"
-
-
; Make sure loop-idiom doesn't create memcpy or memset. There are no library
; implementations of these for R600.
diff --git a/test/CodeGen/R600/max.ll b/test/CodeGen/R600/max.ll
index 1aa9e68830117..fef3e2f0a21ca 100644
--- a/test/CodeGen/R600/max.ll
+++ b/test/CodeGen/R600/max.ll
@@ -115,3 +115,54 @@ define void @s_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
store i32 %val, i32 addrspace(1)* %out, align 4
ret void
}
+
+; Make sure redundant and removed
+; FUNC-LABEL: {{^}}simplify_demanded_bits_test_umax_ugt_i16:
+; SI-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: s_max_u32 [[MIN:s[0-9]+]], [[A]], [[B]]
+; SI-NEXT: v_mov_b32_e32 [[VMIN:v[0-9]+]], [[MIN]]
+; SI-NEXT: buffer_store_dword [[VMIN]]
+define void @simplify_demanded_bits_test_umax_ugt_i16(i32 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) nounwind {
+ %a.ext = zext i16 %a to i32
+ %b.ext = zext i16 %b to i32
+ %cmp = icmp ugt i32 %a.ext, %b.ext
+ %val = select i1 %cmp, i32 %a.ext, i32 %b.ext
+ %mask = and i32 %val, 65535
+ store i32 %mask, i32 addrspace(1)* %out
+ ret void
+}
+
+; Make sure redundant sign_extend_inreg removed.
+
+; FUNC-LABEL: {{^}}simplify_demanded_bits_test_min_slt_i16:
+; SI-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: s_max_i32 [[MIN:s[0-9]+]], [[A]], [[B]]
+; SI-NEXT: v_mov_b32_e32 [[VMIN:v[0-9]+]], [[MIN]]
+; SI-NEXT: buffer_store_dword [[VMIN]]
+define void @simplify_demanded_bits_test_min_slt_i16(i32 addrspace(1)* %out, i16 signext %a, i16 signext %b) nounwind {
+ %a.ext = sext i16 %a to i32
+ %b.ext = sext i16 %b to i32
+ %cmp = icmp sgt i32 %a.ext, %b.ext
+ %val = select i1 %cmp, i32 %a.ext, i32 %b.ext
+ %shl = shl i32 %val, 16
+ %sextinreg = ashr i32 %shl, 16
+ store i32 %sextinreg, i32 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Should get match min/max through extends inserted by
+; legalization.
+
+; FUNC-LABEL: {{^}}s_test_imin_sge_i16:
+; SI: s_sext_i32_i16
+; SI: s_sext_i32_i16
+; SI: v_cmp_ge_i32_e32
+; SI: v_cndmask_b32
+define void @s_test_imin_sge_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwind {
+ %cmp = icmp sge i16 %a, %b
+ %val = select i1 %cmp, i16 %a, i16 %b
+ store i16 %val, i16 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/min.ll b/test/CodeGen/R600/min.ll
index 275e9a7d899bf..0332d1a8e407c 100644
--- a/test/CodeGen/R600/min.ll
+++ b/test/CodeGen/R600/min.ll
@@ -136,3 +136,54 @@ define void @v_test_umin_ult_i32_multi_use(i32 addrspace(1)* %out0, i1 addrspace
store i1 %cmp, i1 addrspace(1)* %outgep1
ret void
}
+
+; Make sure redundant and removed
+; FUNC-LABEL: {{^}}simplify_demanded_bits_test_umin_ult_i16:
+; SI-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: s_min_u32 [[MIN:s[0-9]+]], [[A]], [[B]]
+; SI-NEXT: v_mov_b32_e32 [[VMIN:v[0-9]+]], [[MIN]]
+; SI-NEXT: buffer_store_dword [[VMIN]]
+define void @simplify_demanded_bits_test_umin_ult_i16(i32 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) nounwind {
+ %a.ext = zext i16 %a to i32
+ %b.ext = zext i16 %b to i32
+ %cmp = icmp ult i32 %a.ext, %b.ext
+ %val = select i1 %cmp, i32 %a.ext, i32 %b.ext
+ %mask = and i32 %val, 65535
+ store i32 %mask, i32 addrspace(1)* %out
+ ret void
+}
+
+; Make sure redundant sign_extend_inreg removed.
+
+; FUNC-LABEL: {{^}}simplify_demanded_bits_test_min_slt_i16:
+; SI-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: s_min_i32 [[MIN:s[0-9]+]], [[A]], [[B]]
+; SI-NEXT: v_mov_b32_e32 [[VMIN:v[0-9]+]], [[MIN]]
+; SI-NEXT: buffer_store_dword [[VMIN]]
+define void @simplify_demanded_bits_test_min_slt_i16(i32 addrspace(1)* %out, i16 signext %a, i16 signext %b) nounwind {
+ %a.ext = sext i16 %a to i32
+ %b.ext = sext i16 %b to i32
+ %cmp = icmp slt i32 %a.ext, %b.ext
+ %val = select i1 %cmp, i32 %a.ext, i32 %b.ext
+ %shl = shl i32 %val, 16
+ %sextinreg = ashr i32 %shl, 16
+ store i32 %sextinreg, i32 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Should get match min/max through extends inserted by
+; legalization.
+
+; FUNC-LABEL: {{^}}s_test_imin_sle_i16:
+; SI: s_sext_i32_i16
+; SI: s_sext_i32_i16
+; SI: v_cmp_le_i32_e32
+; SI: v_cndmask_b32
+define void @s_test_imin_sle_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwind {
+ %cmp = icmp sle i16 %a, %b
+ %val = select i1 %cmp, i16 %a, i16 %b
+ store i16 %val, i16 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/sext-in-reg.ll b/test/CodeGen/R600/sext-in-reg.ll
index d9ad4935968d2..5aedda2ce1a9c 100644
--- a/test/CodeGen/R600/sext-in-reg.ll
+++ b/test/CodeGen/R600/sext-in-reg.ll
@@ -450,13 +450,10 @@ define void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
ret void
}
-; FIXME: The BFE should really be eliminated. I think it should happen
-; when computeKnownBitsForTargetNode is implemented for imax.
-
; FUNC-LABEL: {{^}}sext_in_reg_to_illegal_type:
; SI: buffer_load_sbyte
; SI: v_max_i32
-; SI: v_bfe_i32
+; SI-NOT: bfe
; SI: buffer_store_short
define void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
%tmp5 = load i8, i8 addrspace(1)* %src, align 1
diff --git a/test/CodeGen/R600/si-vector-hang.ll b/test/CodeGen/R600/si-vector-hang.ll
index 94c47fe3c600c..bd427dd3ed468 100644
--- a/test/CodeGen/R600/si-vector-hang.ll
+++ b/test/CodeGen/R600/si-vector-hang.ll
@@ -11,10 +11,7 @@
; CHECK: buffer_store_byte
; CHECK: buffer_store_byte
; ModuleID = 'radeon'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
-target triple = "r600--"
-; Function Attrs: nounwind
define void @test_8_min_char(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture readonly %in0, i8 addrspace(1)* nocapture readonly %in1) #0 {
entry:
%0 = load i8, i8 addrspace(1)* %in0, align 1
diff --git a/test/CodeGen/R600/subreg-eliminate-dead.ll b/test/CodeGen/R600/subreg-eliminate-dead.ll
new file mode 100644
index 0000000000000..8bd995a8ecbbb
--- /dev/null
+++ b/test/CodeGen/R600/subreg-eliminate-dead.ll
@@ -0,0 +1,19 @@
+; RUN: llc -mtriple=amdgcn-- -verify-machineinstrs -o - %s | FileCheck %s
+; LiveRangeEdit::eliminateDeadDef did not update LiveInterval sub ranges
+; properly.
+
+; Just make sure this test doesn't crash.
+; CHECK-LABEL: foobar:
+; CHECK: s_endpgm
+define void @foobar() {
+ %v0 = icmp eq <4 x i32> undef, <i32 0, i32 1, i32 2, i32 3>
+ %v3 = sext <4 x i1> %v0 to <4 x i32>
+ %v4 = extractelement <4 x i32> %v3, i32 1
+ %v5 = icmp ne i32 %v4, 0
+ %v6 = select i1 %v5, i32 undef, i32 0
+ %v15 = insertelement <2 x i32> undef, i32 %v6, i32 1
+ store <2 x i32> %v15, <2 x i32> addrspace(1)* undef, align 8
+ ret void
+}
+
+declare double @llvm.fma.f64(double, double, double)
diff --git a/test/CodeGen/R600/trunc-store-f64-to-f16.ll b/test/CodeGen/R600/trunc-store-f64-to-f16.ll
new file mode 100644
index 0000000000000..c29872beef861
--- /dev/null
+++ b/test/CodeGen/R600/trunc-store-f64-to-f16.ll
@@ -0,0 +1,56 @@
+; XFAIL: *
+; RUN: llc -march=amdgcn -mcpu=SI < %s
+
+; GCN-LABEL: {{^}}global_truncstore_f64_to_f16:
+; GCN: s_endpgm
+define void @global_truncstore_f64_to_f16(half addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %val = load double, double addrspace(1)* %in
+ %cvt = fptrunc double %val to half
+ store half %cvt, half addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v2f64_to_v2f16:
+; GCN: s_endpgm
+define void @global_truncstore_v2f64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x double> addrspace(1)* %in) #0 {
+ %val = load <2 x double>, <2 x double> addrspace(1)* %in
+ %cvt = fptrunc <2 x double> %val to <2 x half>
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v3f64_to_v3f16:
+; GCN: s_endpgm
+define void @global_truncstore_v3f64_to_v3f16(<3 x half> addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 {
+ %val = load <3 x double>, <3 x double> addrspace(1)* %in
+ %cvt = fptrunc <3 x double> %val to <3 x half>
+ store <3 x half> %cvt, <3 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v4f64_to_v4f16:
+; GCN: s_endpgm
+define void @global_truncstore_v4f64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x double> addrspace(1)* %in) #0 {
+ %val = load <4 x double>, <4 x double> addrspace(1)* %in
+ %cvt = fptrunc <4 x double> %val to <4 x half>
+ store <4 x half> %cvt, <4 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v8f64_to_v8f16:
+; GCN: s_endpgm
+define void @global_truncstore_v8f64_to_v8f16(<8 x half> addrspace(1)* %out, <8 x double> addrspace(1)* %in) #0 {
+ %val = load <8 x double>, <8 x double> addrspace(1)* %in
+ %cvt = fptrunc <8 x double> %val to <8 x half>
+ store <8 x half> %cvt, <8 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}global_truncstore_v16f64_to_v16f16:
+; GCN: s_endpgm
+define void @global_truncstore_v16f64_to_v16f16(<16 x half> addrspace(1)* %out, <16 x double> addrspace(1)* %in) #0 {
+ %val = load <16 x double>, <16 x double> addrspace(1)* %in
+ %cvt = fptrunc <16 x double> %val to <16 x half>
+ store <16 x half> %cvt, <16 x half> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/unroll.ll b/test/CodeGen/R600/unroll.ll
index ca8d822ec7ede..411a15a4b839c 100644
--- a/test/CodeGen/R600/unroll.ll
+++ b/test/CodeGen/R600/unroll.ll
@@ -1,7 +1,6 @@
-; RUN: opt -loop-unroll -simplifycfg -sroa %s -S -o - | FileCheck %s
+; RUN: opt -mtriple=amdgcn-- -loop-unroll -simplifycfg -sroa %s -S -o - | FileCheck %s
+; RUN: opt -mtriple=r600-- -loop-unroll -simplifycfg -sroa %s -S -o - | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
-target triple = "r600--"
; This test contains a simple loop that initializes an array declared in
; private memory. We want to make sure these kinds of loops are always
diff --git a/test/CodeGen/R600/wrong-transalu-pos-fix.ll b/test/CodeGen/R600/wrong-transalu-pos-fix.ll
index 5ab465338e154..8b383e4c393db 100644
--- a/test/CodeGen/R600/wrong-transalu-pos-fix.ll
+++ b/test/CodeGen/R600/wrong-transalu-pos-fix.ll
@@ -1,14 +1,9 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc -march=r600 -mcpu=redwood -mtriple=r600-- < %s | FileCheck %s
; We want all MULLO_INT inst to be last in their instruction group
;CHECK: {{^}}fill3d:
;CHECK-NOT: MULLO_INT T[0-9]+
-; ModuleID = 'radeon'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
-target triple = "r600--"
-
-; Function Attrs: nounwind
define void @fill3d(i32 addrspace(1)* nocapture %out) #0 {
entry:
%x.i = tail call i32 @llvm.r600.read.global.size.x() #1
diff --git a/test/CodeGen/Thumb2/constant-islands-jump-table.ll b/test/CodeGen/Thumb2/constant-islands-jump-table.ll
index 0dd7092291ba5..5ffe1f9b09f6b 100644
--- a/test/CodeGen/Thumb2/constant-islands-jump-table.ll
+++ b/test/CodeGen/Thumb2/constant-islands-jump-table.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabihf -O1 %s -o - | FileCheck %s
; CHECK-LABEL: test_jump_table:
-; CHECK: b .LBB
+; CHECK: b{{.*}} .LBB
; CHECK-NOT: tbh
define i32 @test_jump_table(i32 %x, float %in) {
diff --git a/test/CodeGen/Thumb2/float-ops.ll b/test/CodeGen/Thumb2/float-ops.ll
index 7ec08f866655a..4c42908ce13b8 100644
--- a/test/CodeGen/Thumb2/float-ops.ll
+++ b/test/CodeGen/Thumb2/float-ops.ll
@@ -109,7 +109,7 @@ entry:
define double @load_d(double* %a) {
entry:
; CHECK-LABEL: load_d:
-; NONE: ldm r0, {r0, r1}
+; NONE: ldrd r0, r1, [r0]
; HARD: vldr d0, [r0]
%0 = load double, double* %a, align 8
ret double %0
@@ -127,9 +127,7 @@ entry:
define void @store_d(double* %a, double %b) {
entry:
; CHECK-LABEL: store_d:
-; NONE: mov r1, r3
-; NONE: str r2, [r0]
-; NONE: str r1, [r0, #4]
+; NONE: strd r2, r3, [r0]
; HARD: vstr d0, [r0]
store double %b, double* %a, align 8
ret void
diff --git a/test/CodeGen/Thumb2/thumb2-tbh.ll b/test/CodeGen/Thumb2/thumb2-tbh.ll
index a5a5ed0c8da26..0761ed589a266 100644
--- a/test/CodeGen/Thumb2/thumb2-tbh.ll
+++ b/test/CodeGen/Thumb2/thumb2-tbh.ll
@@ -14,9 +14,19 @@ declare void @Z_fatal(i8*) noreturn nounwind
declare noalias i8* @calloc(i32, i32) nounwind
+; Jump tables are not anchored next to the TBB/TBH any more. Make sure the
+; correct address is still calculated (i.e. via a PC-relative symbol *at* the
+; TBB/TBH).
define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
; CHECK-LABEL: main:
-; CHECK: tbb
+; CHECK-NOT: adr {{r[0-9]+}}, LJTI
+; CHECK: [[PCREL_ANCHOR:LCPI[0-9]+_[0-9]+]]:
+; CHECK-NEXT: tbb [pc, {{r[0-9]+}}]
+
+; CHECK: LJTI0_0:
+; CHECK-NEXT: .data_region jt8
+; CHECK-NEXT: .byte (LBB{{[0-9]+_[0-9]+}}-([[PCREL_ANCHOR]]+4))/2
+
entry:
br label %bb42.i
diff --git a/test/CodeGen/X86/asm-reject-reg-type-mismatch.ll b/test/CodeGen/X86/asm-reject-reg-type-mismatch.ll
new file mode 100644
index 0000000000000..016e2d261eef6
--- /dev/null
+++ b/test/CodeGen/X86/asm-reject-reg-type-mismatch.ll
@@ -0,0 +1,10 @@
+; RUN: not llc -no-integrated-as %s -o - 2> %t1
+; RUN: FileCheck %s < %t1
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64--"
+
+; CHECK: error: couldn't allocate output register for constraint '{ax}'
+define i128 @blup() {
+ %v = tail call i128 asm "", "={ax},0,~{dirflag},~{fpsr},~{flags}"(i128 0)
+ ret i128 %v
+}
diff --git a/test/CodeGen/X86/avx-vperm2x128.ll b/test/CodeGen/X86/avx-vperm2x128.ll
index 10ed079a264e8..74d20f348b529 100644
--- a/test/CodeGen/X86/avx-vperm2x128.ll
+++ b/test/CodeGen/X86/avx-vperm2x128.ll
@@ -147,8 +147,8 @@ define <16 x i16> @E5i(<16 x i16>* %a, <16 x i16>* %b) nounwind uwtable readnone
; AVX1-LABEL: E5i:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
-; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vmovaps (%rsi), %ymm1
+; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index 8aae90c3c03db..5d99269ae1dc8 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -300,6 +300,56 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
ret <16 x i16> %shl
}
+define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
+; CHECK-LABEL: shl_32i8
+; CHECK: vextracti128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vpsllw $4, %xmm3, %xmm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; CHECK-NEXT: vpand %xmm8, %xmm2, %xmm5
+; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vpsllw $5, %xmm2, %xmm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm9 = [224,224,224,224,224,224,224,224,224,224,224,224,224,224,224,224]
+; CHECK-NEXT: vpand %xmm9, %xmm2, %xmm7
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; CHECK-NEXT: vpand %xmm7, %xmm2, %xmm4
+; CHECK-NEXT: vpcmpeqb %xmm2, %xmm4, %xmm4
+; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
+; CHECK-NEXT: vpsllw $2, %xmm3, %xmm4
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; CHECK-NEXT: vpand %xmm5, %xmm4, %xmm4
+; CHECK-NEXT: vpaddb %xmm7, %xmm7, %xmm7
+; CHECK-NEXT: vpand %xmm7, %xmm2, %xmm6
+; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6
+; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3
+; CHECK-NEXT: vpaddb %xmm3, %xmm3, %xmm4
+; CHECK-NEXT: vpaddb %xmm7, %xmm7, %xmm6
+; CHECK-NEXT: vpand %xmm6, %xmm2, %xmm6
+; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6
+; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3
+; CHECK-NEXT: vpsllw $4, %xmm0, %xmm4
+; CHECK-NEXT: vpand %xmm8, %xmm4, %xmm4
+; CHECK-NEXT: vpsllw $5, %xmm1, %xmm1
+; CHECK-NEXT: vpand %xmm9, %xmm1, %xmm1
+; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm6
+; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6
+; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vpsllw $2, %xmm0, %xmm4
+; CHECK-NEXT: vpand %xmm5, %xmm4, %xmm4
+; CHECK-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm5
+; CHECK-NEXT: vpcmpeqb %xmm2, %xmm5, %xmm5
+; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vpaddb %xmm0, %xmm0, %xmm4
+; CHECK-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm1
+; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; CHECK-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %shl = shl <32 x i8> %r, %a
+ ret <32 x i8> %shl
+}
+
define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; CHECK-LABEL: ashr_8i16
; CHECK: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
@@ -329,6 +379,176 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
ret <16 x i16> %ashr
}
+define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
+; CHECK-LABEL: ashr_32i8
+; CHECK: vextracti128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vpextrb $1, %xmm2, %ecx
+; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vpextrb $1, %xmm3, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: vpextrb $0, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $0, %xmm3, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: movzbl %dl, %edx
+; CHECK-NEXT: vpextrb $2, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $2, %xmm3, %esi
+; CHECK-NEXT: sarb %cl, %sil
+; CHECK-NEXT: vmovd %edx, %xmm4
+; CHECK-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %sil, %eax
+; CHECK-NEXT: vpextrb $3, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $3, %xmm3, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $4, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $4, %xmm3, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $5, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $5, %xmm3, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: vpextrb $6, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $6, %xmm3, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $7, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $7, %xmm3, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $8, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $8, %xmm3, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $9, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $9, %xmm3, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: vpextrb $10, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $10, %xmm3, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $11, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $11, %xmm3, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $12, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $12, %xmm3, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $13, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $13, %xmm3, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: vpextrb $14, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $14, %xmm3, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $15, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $15, %xmm3, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: vpextrb $1, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $1, %xmm0, %esi
+; CHECK-NEXT: sarb %cl, %sil
+; CHECK-NEXT: movzbl %dl, %ecx
+; CHECK-NEXT: vpinsrb $14, %ecx, %xmm4, %xmm2
+; CHECK-NEXT: vpextrb $0, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $0, %xmm0, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: vpextrb $2, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $2, %xmm0, %edi
+; CHECK-NEXT: sarb %cl, %dil
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; CHECK-NEXT: movzbl %sil, %eax
+; CHECK-NEXT: movzbl %dl, %ecx
+; CHECK-NEXT: vmovd %ecx, %xmm3
+; CHECK-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: vpextrb $3, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $3, %xmm0, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $4, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $4, %xmm0, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $5, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $5, %xmm0, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: vpextrb $6, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $6, %xmm0, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $7, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $7, %xmm0, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $8, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $8, %xmm0, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $9, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $9, %xmm0, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: vpextrb $10, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $10, %xmm0, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $11, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $11, %xmm0, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $12, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $12, %xmm0, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $13, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $13, %xmm0, %eax
+; CHECK-NEXT: sarb %cl, %al
+; CHECK-NEXT: vpextrb $14, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $14, %xmm0, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $15, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $15, %xmm0, %edx
+; CHECK-NEXT: sarb %cl, %dl
+; CHECK-NEXT: vpinsrb $14, %eax, %xmm3, %xmm0
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %ashr = ashr <32 x i8> %r, %a
+ ret <32 x i8> %ashr
+}
+
define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; CHECK-LABEL: lshr_8i16
; CHECK: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
@@ -357,3 +577,173 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
%lshr = lshr <16 x i16> %r, %a
ret <16 x i16> %lshr
}
+
+define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
+; CHECK-LABEL: lshr_32i8
+; CHECK: vextracti128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vpextrb $1, %xmm2, %ecx
+; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vpextrb $1, %xmm3, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: vpextrb $0, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $0, %xmm3, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: movzbl %dl, %edx
+; CHECK-NEXT: vpextrb $2, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $2, %xmm3, %esi
+; CHECK-NEXT: shrb %cl, %sil
+; CHECK-NEXT: vmovd %edx, %xmm4
+; CHECK-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %sil, %eax
+; CHECK-NEXT: vpextrb $3, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $3, %xmm3, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $4, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $4, %xmm3, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $5, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $5, %xmm3, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: vpextrb $6, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $6, %xmm3, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $7, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $7, %xmm3, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $8, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $8, %xmm3, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $9, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $9, %xmm3, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: vpextrb $10, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $10, %xmm3, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $11, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $11, %xmm3, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $12, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $12, %xmm3, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $13, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $13, %xmm3, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: vpextrb $14, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $14, %xmm3, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
+; CHECK-NEXT: vpextrb $15, %xmm2, %ecx
+; CHECK-NEXT: vpextrb $15, %xmm3, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: vpextrb $1, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $1, %xmm0, %esi
+; CHECK-NEXT: shrb %cl, %sil
+; CHECK-NEXT: movzbl %dl, %ecx
+; CHECK-NEXT: vpinsrb $14, %ecx, %xmm4, %xmm2
+; CHECK-NEXT: vpextrb $0, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $0, %xmm0, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: vpextrb $2, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $2, %xmm0, %edi
+; CHECK-NEXT: shrb %cl, %dil
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; CHECK-NEXT: movzbl %sil, %eax
+; CHECK-NEXT: movzbl %dl, %ecx
+; CHECK-NEXT: vmovd %ecx, %xmm3
+; CHECK-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: vpextrb $3, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $3, %xmm0, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $4, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $4, %xmm0, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $5, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $5, %xmm0, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: vpextrb $6, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $6, %xmm0, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $7, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $7, %xmm0, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $8, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $8, %xmm0, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $9, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $9, %xmm0, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: vpextrb $10, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $10, %xmm0, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $11, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $11, %xmm0, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $12, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $12, %xmm0, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; CHECK-NEXT: vpextrb $13, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $13, %xmm0, %eax
+; CHECK-NEXT: shrb %cl, %al
+; CHECK-NEXT: vpextrb $14, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $14, %xmm0, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpextrb $15, %xmm1, %ecx
+; CHECK-NEXT: vpextrb $15, %xmm0, %edx
+; CHECK-NEXT: shrb %cl, %dl
+; CHECK-NEXT: vpinsrb $14, %eax, %xmm3, %xmm0
+; CHECK-NEXT: movzbl %dl, %eax
+; CHECK-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %lshr = lshr <32 x i8> %r, %a
+ ret <32 x i8> %lshr
+}
diff --git a/test/CodeGen/X86/avx512-build-vector.ll b/test/CodeGen/X86/avx512-build-vector.ll
index 8373c6da26199..e70d9f3ad521c 100644
--- a/test/CodeGen/X86/avx512-build-vector.ll
+++ b/test/CodeGen/X86/avx512-build-vector.ll
@@ -2,13 +2,9 @@
define <16 x i32> @test1(i32* %x) {
; CHECK-LABEL: test1:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmovd (%rdi), %xmm0
-; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7]
-; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; CHECK-NEXT: retq
+; CHECK: vmovd (%rdi), %xmm
+; CHECK: vmovdqa32
+; CHECK: vpermt2d %zmm
%y = load i32, i32* %x, align 4
%res = insertelement <16 x i32>zeroinitializer, i32 %y, i32 4
ret <16 x i32>%res
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index 471e34cdedced..9387192f8aa44 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -98,18 +98,55 @@ define <4 x float> @test_rcp14_ss(<4 x float> %a0) {
declare <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) {
+ ; CHECK-LABEL: test_sqrt_pd_512
; CHECK: vsqrtpd
- %res = call <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 4) ; <<8 x double>> [#uses=1]
+ %res = call <8 x double> @llvm.x86.avx512.mask.sqrt.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 4)
ret <8 x double> %res
}
-declare <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double>, <8 x double>, i8, i32) nounwind readnone
+declare <8 x double> @llvm.x86.avx512.mask.sqrt.pd.512(<8 x double>, <8 x double>, i8, i32) nounwind readnone
define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) {
+ ; CHECK-LABEL: test_sqrt_ps_512
; CHECK: vsqrtps
- %res = call <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4) ; <<16 x float>> [#uses=1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
ret <16 x float> %res
}
-declare <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float>, <16 x float>, i16, i32) nounwind readnone
+define <16 x float> @test_sqrt_round_ps_512(<16 x float> %a0) {
+ ; CHECK-LABEL: test_sqrt_round_ps_512
+ ; CHECK: vsqrtps {rz-sae}
+ %res = call <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 3)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float>, <16 x float>, i16, i32) nounwind readnone
+
+define <8 x double> @test_getexp_pd_512(<8 x double> %a0) {
+ ; CHECK-LABEL: test_getexp_pd_512
+ ; CHECK: vgetexppd
+ %res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 4)
+ ret <8 x double> %res
+}
+define <8 x double> @test_getexp_round_pd_512(<8 x double> %a0) {
+ ; CHECK-LABEL: test_getexp_round_pd_512
+ ; CHECK: vgetexppd {sae}
+ %res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double>, <8 x double>, i8, i32) nounwind readnone
+
+define <16 x float> @test_getexp_ps_512(<16 x float> %a0) {
+ ; CHECK-LABEL: test_getexp_ps_512
+ ; CHECK: vgetexpps
+ %res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_getexp_round_ps_512(<16 x float> %a0) {
+ ; CHECK-LABEL: test_getexp_round_ps_512
+ ; CHECK: vgetexpps {sae}
+ %res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float>, <16 x float>, i16, i32) nounwind readnone
define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK: vsqrtss {{.*}}encoding: [0x62
diff --git a/test/CodeGen/X86/avx512-shuffle.ll b/test/CodeGen/X86/avx512-shuffle.ll
new file mode 100644
index 0000000000000..2683d6fe238c5
--- /dev/null
+++ b/test/CodeGen/X86/avx512-shuffle.ll
@@ -0,0 +1,336 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=CHECK-SKX
+
+; CHECK-LABEL: test1:
+; CHECK: vpermps
+; CHECK: ret
+define <16 x float> @test1(<16 x float> %a) nounwind {
+ %c = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> <i32 2, i32 5, i32 undef, i32 undef, i32 7, i32 undef, i32 10, i32 1, i32 0, i32 5, i32 undef, i32 4, i32 7, i32 undef, i32 10, i32 1>
+ ret <16 x float> %c
+}
+
+; CHECK-LABEL: test2:
+; CHECK: vpermd
+; CHECK: ret
+define <16 x i32> @test2(<16 x i32> %a) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> <i32 2, i32 5, i32 undef, i32 undef, i32 7, i32 undef, i32 10, i32 1, i32 0, i32 5, i32 undef, i32 4, i32 7, i32 undef, i32 10, i32 1>
+ ret <16 x i32> %c
+}
+
+; CHECK-LABEL: test3:
+; CHECK: vpermq
+; CHECK: ret
+define <8 x i64> @test3(<8 x i64> %a) nounwind {
+ %c = shufflevector <8 x i64> %a, <8 x i64> undef, <8 x i32> <i32 2, i32 5, i32 1, i32 undef, i32 7, i32 undef, i32 3, i32 1>
+ ret <8 x i64> %c
+}
+
+; CHECK-LABEL: test4:
+; CHECK: vpermpd
+; CHECK: ret
+define <8 x double> @test4(<8 x double> %a) nounwind {
+ %c = shufflevector <8 x double> %a, <8 x double> undef, <8 x i32> <i32 1, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x double> %c
+}
+
+; CHECK-LABEL: test5:
+; CHECK: vpermt2pd
+; CHECK: ret
+define <8 x double> @test5(<8 x double> %a, <8 x double> %b) nounwind {
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ ret <8 x double> %c
+}
+
+; CHECK-LABEL: test6:
+; CHECK: vpermq $30
+; CHECK: ret
+define <8 x i64> @test6(<8 x i64> %a) nounwind {
+ %c = shufflevector <8 x i64> %a, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 1, i32 0, i32 6, i32 7, i32 5, i32 4>
+ ret <8 x i64> %c
+}
+
+; CHECK-LABEL: test7:
+; CHECK: vpermt2q
+; CHECK: ret
+define <8 x i64> @test7(<8 x i64> %a, <8 x i64> %b) nounwind {
+ %c = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ ret <8 x i64> %c
+}
+
+; CHECK-LABEL: test8:
+; CHECK: vpermt2d
+; CHECK: ret
+define <16 x i32> @test8(<16 x i32> %a, <16 x i32> %b) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ ret <16 x i32> %c
+}
+
+; CHECK-LABEL: test9:
+; CHECK: vpermt2ps
+; CHECK: ret
+define <16 x float> @test9(<16 x float> %a, <16 x float> %b) nounwind {
+ %c = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ ret <16 x float> %c
+}
+
+; CHECK-LABEL: test10:
+; CHECK: vpermt2ps (
+; CHECK: ret
+define <16 x float> @test10(<16 x float> %a, <16 x float>* %b) nounwind {
+ %c = load <16 x float>, <16 x float>* %b
+ %d = shufflevector <16 x float> %a, <16 x float> %c, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ ret <16 x float> %d
+}
+
+; CHECK-LABEL: test11:
+; CHECK: vpermt2d
+; CHECK: ret
+define <16 x i32> @test11(<16 x i32> %a, <16 x i32>* %b) nounwind {
+ %c = load <16 x i32>, <16 x i32>* %b
+ %d = shufflevector <16 x i32> %a, <16 x i32> %c, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ ret <16 x i32> %d
+}
+
+; CHECK-LABEL: test13
+; CHECK: vpermilps $177, %zmm
+; CHECK: ret
+define <16 x float> @test13(<16 x float> %a) {
+ %b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32><i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+ ret <16 x float> %b
+}
+
+; CHECK-LABEL: test14
+; CHECK: vpermilpd $203, %zmm
+; CHECK: ret
+define <8 x double> @test14(<8 x double> %a) {
+ %b = shufflevector <8 x double> %a, <8 x double> undef, <8 x i32><i32 1, i32 1, i32 2, i32 3, i32 4, i32 4, i32 7, i32 7>
+ ret <8 x double> %b
+}
+
+; CHECK-LABEL: test15
+; CHECK: vpshufd $177, %zmm
+; CHECK: ret
+define <16 x i32> @test15(<16 x i32> %a) {
+; mask 1-0-3-2 = 10110001 = 0xb1 = 177
+ %b = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32><i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+ ret <16 x i32> %b
+}
+; CHECK-LABEL: test16
+; CHECK: valignq $2, %zmm0, %zmm1
+; CHECK: ret
+define <8 x double> @test16(<8 x double> %a, <8 x double> %b) nounwind {
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x double> %c
+}
+
+; CHECK-LABEL: test17
+; CHECK: vshufpd $19, %zmm1, %zmm0
+; CHECK: ret
+define <8 x double> @test17(<8 x double> %a, <8 x double> %b) nounwind {
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 9, i32 2, i32 10, i32 5, i32 undef, i32 undef, i32 undef>
+ ret <8 x double> %c
+}
+
+; CHECK-LABEL: test18
+; CHECK: vpunpckhdq %zmm
+; CHECK: ret
+define <16 x i32> @test18(<16 x i32> %a, <16 x i32> %c) {
+ %b = shufflevector <16 x i32> %a, <16 x i32> %c, <16 x i32><i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i32> %b
+}
+
+; CHECK-LABEL: test19
+; CHECK: vpunpckldq %zmm
+; CHECK: ret
+define <16 x i32> @test19(<16 x i32> %a, <16 x i32> %c) {
+ %b = shufflevector <16 x i32> %a, <16 x i32> %c, <16 x i32><i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
+ ret <16 x i32> %b
+}
+
+; CHECK-LABEL: test20
+; CHECK: vpunpckhqdq %zmm
+; CHECK: ret
+define <8 x i64> @test20(<8 x i64> %a, <8 x i64> %c) {
+ %b = shufflevector <8 x i64> %a, <8 x i64> %c, <8 x i32><i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i64> %b
+}
+
+; CHECK-LABEL: test21
+; CHECK: vbroadcastsd %xmm0, %zmm
+; CHECK: ret
+define <8 x double> @test21(<8 x double> %a, <8 x double> %b) {
+ %shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x double> %shuffle
+}
+
+; CHECK-LABEL: test22
+; CHECK: vpbroadcastq %xmm0, %zmm
+; CHECK: ret
+define <8 x i64> @test22(<8 x i64> %a, <8 x i64> %b) {
+ %shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i64> %shuffle
+}
+
+; CHECK-LABEL: @test23
+; CHECK: vshufps
+; CHECK: vshufps
+; CHECK: ret
+define <16 x i32> @test23(<16 x i32> %a, <16 x i32> %b) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <16 x i32> %c
+}
+
+; CHECK-LABEL: @test24
+; CHECK: vpermt2d
+; CHECK: ret
+define <16 x i32> @test24(<16 x i32> %a, <16 x i32> %b) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 25, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <16 x i32> %c
+}
+
+; CHECK-LABEL: @test25
+; CHECK: vshufps $52
+; CHECK: ret
+define <16 x i32> @test25(<16 x i32> %a, <16 x i32> %b) nounwind {
+; mask - 0-1-3-0 00110100 = 0x34 = 52
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 19, i32 16, i32 4, i32 5, i32 23, i32 undef, i32 8, i32 9, i32 27, i32 undef, i32 12, i32 13, i32 undef, i32 undef>
+ ret <16 x i32> %c
+}
+
+; CHECK-LABEL: @test26
+; CHECK: vmovshdup
+; CHECK: ret
+define <16 x i32> @test26(<16 x i32> %a) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 undef, i32 9, i32 9, i32 undef, i32 11, i32 13, i32 undef, i32 undef, i32 undef>
+ ret <16 x i32> %c
+}
+
+; CHECK-LABEL: @test27
+; CHECK: ret
+define <16 x i32> @test27(<4 x i32>%a) {
+ %res = shufflevector <4 x i32> %a, <4 x i32> undef, <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <16 x i32> %res
+}
+
+; CHECK-LABEL: test28
+; CHECK: vpshufhw $177, %ymm
+; CHECK: ret
+define <16 x i16> @test28(<16 x i16> %a) {
+ %b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32><i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 13, i32 12, i32 15, i32 14>
+ ret <16 x i16> %b
+}
+
+; CHECK-LABEL: test29
+; CHECK: vunpcklps %zmm
+; CHECK: ret
+define <16 x float> @test29(<16 x float> %a, <16 x float> %c) {
+ %b = shufflevector <16 x float> %a, <16 x float> %c, <16 x i32><i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
+ ret <16 x float> %b
+}
+
+; CHECK-LABEL: @test30
+; CHECK: vshufps $144, %zmm
+; CHECK: ret
+define <16 x float> @test30(<16 x float> %a, <16 x float> %c) {
+ %b = shufflevector <16 x float> %a, <16 x float> %c, <16 x i32><i32 0, i32 0, i32 17, i32 18, i32 4, i32 4, i32 21, i32 22, i32 8, i32 8, i32 25, i32 26, i32 12, i32 12, i32 29, i32 30>
+ ret <16 x float> %b
+}
+
+; CHECK-LABEL: test31
+; CHECK: valignd $3, %zmm0, %zmm1
+; CHECK: ret
+define <16 x i32> @test31(<16 x i32> %a, <16 x i32> %b) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 3, i32 4, i32 5, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
+ ret <16 x i32> %c
+}
+
+; CHECK-LABEL: test32
+; CHECK: vshufpd $99, %zmm0, %zmm1
+; CHECK: ret
+define <8 x double> @test32(<8 x double> %a, <8 x double> %b) nounwind {
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 9, i32 1, i32 10, i32 2, i32 undef, i32 5, i32 15, i32 undef>
+ ret <8 x double> %c
+}
+
+define <16 x i32> @test_align_v16i32_rr(<16 x i32> %a, <16 x i32> %b) nounwind {
+; CHECK-LABEL: test_align_v16i32_rr:
+; CHECK: ## BB#0:
+; CHECK-NEXT: valignd $3, %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 3, i32 4, i32 5, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
+ ret <16 x i32> %c
+}
+
+define <16 x i32> @test_align_v16i32_rm(<16 x i32>* %a.ptr, <16 x i32> %b) nounwind {
+; CHECK-LABEL: test_align_v16i32_rm:
+; CHECK: ## BB#0:
+; CHECK-NEXT: valignd $3, (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: retq
+ %a = load <16 x i32>, <16 x i32>* %a.ptr
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 3, i32 4, i32 5, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
+ ret <16 x i32> %c
+}
+
+define <16 x i32> @test_align_v16i32_rm_mask(<16 x i32>* %a.ptr, <16 x i32> %b, <16 x i1> %mask) nounwind {
+; CHECK-LABEL: test_align_v16i32_rm_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpmovsxbd %xmm1, %zmm1
+; CHECK-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm1, %zmm1
+; CHECK-NEXT: vptestmd %zmm1, %zmm1, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1
+; CHECK-NEXT: valignd $3, %zmm1, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovaps %zmm1, %zmm0
+; CHECK-NEXT: retq
+;
+; CHECK-SKX-LABEL: test_align_v16i32_rm_mask:
+; CHECK-SKX: ## BB#0:
+; CHECK-SKX-NEXT: vpmovb2m %xmm1, %k1
+; CHECK-SKX-NEXT: vmovdqa32 (%rdi), %zmm1
+; CHECK-SKX-NEXT: valignd $3, %zmm1, %zmm0, %zmm1 {%k1}
+; CHECK-SKX-NEXT: vmovaps %zmm1, %zmm0
+; CHECK-SKX-NEXT: retq
+ %a = load <16 x i32>, <16 x i32>* %a.ptr
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 3, i32 4, i32 5, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
+ %res = select <16 x i1> %mask,<16 x i32> %c, <16 x i32> %a
+ ret <16 x i32> %res
+}
+
+define <8 x double> @test_align_v8f64_rr(<8 x double> %a, <8 x double> %b) nounwind {
+; CHECK-LABEL: test_align_v8f64_rr:
+; CHECK: ## BB#0:
+; CHECK-NEXT: valignq $3, %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x double> %c
+}
+
+define <8 x double> @test_align_v18f64_rm(<8 x double>* %a.ptr, <8 x double> %b) nounwind {
+; CHECK-LABEL: test_align_v18f64_rm:
+; CHECK: ## BB#0:
+; CHECK-NEXT: valignq $3, (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: retq
+ %a = load <8 x double>, <8 x double>* %a.ptr
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x double> %c
+}
+
+define <8 x double> @test_align_v18f64_rm_mask(<8 x double>* %a.ptr, <8 x double> %b, <8 x i1> %mask) nounwind {
+; CHECK-LABEL: test_align_v18f64_rm_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpmovsxwq %xmm1, %zmm1
+; CHECK-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm1, %zmm1
+; CHECK-NEXT: vptestmq %zmm1, %zmm1, %k1
+; CHECK-NEXT: valignq $3, (%rdi), %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+;
+; CHECK-SKX-LABEL: test_align_v18f64_rm_mask:
+; CHECK-SKX: ## BB#0:
+; CHECK-SKX-NEXT: vpmovw2m %xmm1, %k1
+; CHECK-SKX-NEXT: valignq $3, (%rdi), %zmm0, %zmm0 {%k1} {z}
+; CHECK-SKX-NEXT: retq
+ %a = load <8 x double>, <8 x double>* %a.ptr
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ %res = select <8 x i1> %mask,<8 x double> %c, <8 x double> zeroinitializer
+ ret <8 x double> %res
+}
+
diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll
index e1f6276c6ef4f..04028a1da510d 100644
--- a/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -116,11 +116,8 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; KNL-LABEL: test9:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: YMM1<def> YMM1<kill> ZMM1<def>
-; KNL-NEXT: ## kill: YMM0<def> YMM0<kill> ZMM0<def>
; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: YMM0<def> YMM0<kill> ZMM0<kill>
; KNL-NEXT: retq
%mask = icmp eq <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
@@ -130,11 +127,8 @@ define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; KNL-LABEL: test10:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: YMM1<def> YMM1<kill> ZMM1<def>
-; KNL-NEXT: ## kill: YMM0<def> YMM0<kill> ZMM0<def>
; KNL-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: YMM0<def> YMM0<kill> ZMM0<kill>
; KNL-NEXT: retq
; SKX-LABEL: test10:
; SKX: ## BB#0:
@@ -166,7 +160,6 @@ define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; KNL-NEXT: kunpckbw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: AX<def> AX<kill> EAX<kill>
; KNL-NEXT: retq
%res = icmp eq <16 x i64> %a, %b
%res1 = bitcast <16 x i1> %res to i16
diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll
index fd76ed5d0dbdb..9d96c272f3554 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -668,7 +668,7 @@ declare <4 x float> @llvm.x86.avx512.mask.compress.ps.128(<4 x float> %data, <4
; CHECK-LABEL: compr7
; CHECK-NOT: vcompress
-; CHECK: vmovapd
+; CHECK: vmovupd
define void @compr7(i8* %addr, <8 x double> %data) {
call void @llvm.x86.avx512.mask.compress.store.pd.512(i8* %addr, <8 x double> %data, i8 -1)
ret void
@@ -757,7 +757,7 @@ declare <4 x float> @llvm.x86.avx512.mask.expand.ps.128(<4 x float> %data, <4 x
; CHECK-LABEL: expand7
; CHECK-NOT: vexpand
-; CHECK: vmovapd
+; CHECK: vmovupd
define <8 x double> @expand7(i8* %addr, <8 x double> %data) {
%res = call <8 x double> @llvm.x86.avx512.mask.expand.load.pd.512(i8* %addr, <8 x double> %data, i8 -1)
ret <8 x double> %res
@@ -2552,4 +2552,38 @@ define <4 x float> @test_mm512_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %
%res = call <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
ret <4 x float> %res
}
-declare <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) \ No newline at end of file
+declare <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <4 x double> @test_sqrt_pd_256(<4 x double> %a0, i8 %mask) {
+ ; CHECK-LABEL: test_sqrt_pd_256
+ ; CHECK: vsqrtpd
+ %res = call <4 x double> @llvm.x86.avx512.mask.sqrt.pd.256(<4 x double> %a0, <4 x double> zeroinitializer, i8 %mask)
+ ret <4 x double> %res
+}
+declare <4 x double> @llvm.x86.avx512.mask.sqrt.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
+
+define <8 x float> @test_sqrt_ps_256(<8 x float> %a0, i8 %mask) {
+ ; CHECK-LABEL: test_sqrt_ps_256
+ ; CHECK: vsqrtps
+ %res = call <8 x float> @llvm.x86.avx512.mask.sqrt.ps.256(<8 x float> %a0, <8 x float> zeroinitializer, i8 %mask)
+ ret <8 x float> %res
+}
+
+declare <8 x float> @llvm.x86.avx512.mask.sqrt.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
+
+define <4 x double> @test_getexp_pd_256(<4 x double> %a0) {
+ ; CHECK-LABEL: test_getexp_pd_256
+ ; CHECK: vgetexppd
+ %res = call <4 x double> @llvm.x86.avx512.mask.getexp.pd.256(<4 x double> %a0, <4 x double> zeroinitializer, i8 -1)
+ ret <4 x double> %res
+}
+
+declare <4 x double> @llvm.x86.avx512.mask.getexp.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
+
+define <8 x float> @test_getexp_ps_256(<8 x float> %a0) {
+ ; CHECK-LABEL: test_getexp_ps_256
+ ; CHECK: vgetexpps
+ %res = call <8 x float> @llvm.x86.avx512.mask.getexp.ps.256(<8 x float> %a0, <8 x float> zeroinitializer, i8 -1)
+ ret <8 x float> %res
+}
+declare <8 x float> @llvm.x86.avx512.mask.getexp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone \ No newline at end of file
diff --git a/test/CodeGen/X86/buildvec-insertvec.ll b/test/CodeGen/X86/buildvec-insertvec.ll
index 3fb69a48b3c76..73dbe1f650a12 100644
--- a/test/CodeGen/X86/buildvec-insertvec.ll
+++ b/test/CodeGen/X86/buildvec-insertvec.ll
@@ -1,15 +1,56 @@
-; RUN: llc < %s -mcpu=corei7 -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s
define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind {
+; CHECK-LABEL: foo:
+; CHECK: # BB#0:
+; CHECK-NEXT: cvttps2dq %xmm0, %xmm0
+; CHECK-NEXT: movl $255, %eax
+; CHECK-NEXT: pinsrd $3, %eax, %xmm0
+; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; CHECK-NEXT: movd %xmm0, (%rdi)
+; CHECK-NEXT: retq
%t0 = fptoui <3 x float> %in to <3 x i8>
%t1 = shufflevector <3 x i8> %t0, <3 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
%t2 = insertelement <4 x i8> %t1, i8 -1, i32 3
store <4 x i8> %t2, <4 x i8>* %out, align 4
ret void
-; CHECK: foo
-; CHECK: cvttps2dq
-; CHECK-NOT: pextrd
-; CHECK: pinsrd
-; CHECK-NEXT: pshufb
-; CHECK: ret
+}
+
+; Verify that the DAGCombiner doesn't wrongly fold a build_vector into a
+; blend with a zero vector if the build_vector contains negative zero.
+;
+; TODO: the codegen for function 'test_negative_zero_1' is sub-optimal.
+; Ideally, we should generate a single shuffle blend operation.
+
+define <4 x float> @test_negative_zero_1(<4 x float> %A) {
+; CHECK-LABEL: test_negative_zero_1:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1,0]
+; CHECK-NEXT: xorps %xmm2, %xmm2
+; CHECK-NEXT: blendps {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; CHECK-NEXT: retq
+entry:
+ %0 = extractelement <4 x float> %A, i32 0
+ %1 = insertelement <4 x float> undef, float %0, i32 0
+ %2 = insertelement <4 x float> %1, float -0.0, i32 1
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = insertelement <4 x float> %2, float %3, i32 2
+ %5 = insertelement <4 x float> %4, float 0.0, i32 3
+ ret <4 x float> %5
+}
+
+define <2 x double> @test_negative_zero_2(<2 x double> %A) {
+; CHECK-LABEL: test_negative_zero_2:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movhpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: retq
+entry:
+ %0 = extractelement <2 x double> %A, i32 0
+ %1 = insertelement <2 x double> undef, double %0, i32 0
+ %2 = insertelement <2 x double> %1, double -0.0, i32 1
+ ret <2 x double> %2
}
diff --git a/test/CodeGen/X86/critical-anti-dep-breaker.ll b/test/CodeGen/X86/critical-anti-dep-breaker.ll
index 86afc1f245ad2..de5744d3a88f7 100644
--- a/test/CodeGen/X86/critical-anti-dep-breaker.ll
+++ b/test/CodeGen/X86/critical-anti-dep-breaker.ll
@@ -11,8 +11,7 @@
@NullToken = external global i64
; CHECK-LABEL: Part_Create:
-; CHECK-DAG: # kill: RDI<def>
-; CHECK-DAG: movq PartClass@GOTPCREL(%rip), %r10
+; CHECK: movq PartClass@GOTPCREL(%rip), %r10
define i32 @Part_Create(i64* %Anchor, i32 %TypeNum, i32 %F, i32 %Z, i32* %Status, i64* %PartTkn) {
%PartObj = alloca i64*, align 8
%Vchunk = alloca i64, align 8
diff --git a/test/CodeGen/X86/machine-cp.ll b/test/CodeGen/X86/machine-cp.ll
index 0006b6ea7133f..aaed0f0a23dca 100644
--- a/test/CodeGen/X86/machine-cp.ll
+++ b/test/CodeGen/X86/machine-cp.ll
@@ -58,3 +58,58 @@ while.end: ; preds = %while.body, %entry
%t = trunc i64 %a.addr.0.lcssa to i32
ret i32 %t
}
+
+; Check that copy propagation does not kill thing like:
+; dst = copy src <-- do not kill that.
+; ... = op1 dst<undef>
+; ... = op2 dst <-- this is used here.
+;
+; CHECK-LABEL: foo:
+; CHECK: psllw $7,
+; CHECK: psllw $7,
+; CHECK-NEXT: pand
+; CHECK-NEXT: pcmpgtb
+; CHECK-NEXT: pand %xmm{{[0-9]+}}, [[SRC:%xmm[0-9]+]]
+; Machine propagation used to delete the first copy as the
+; first few uses were <undef>.
+; CHECK-NEXT: movdqa [[SRC]], [[CPY1:%xmm[0-9]+]]
+; CHECK-NEXT: movdqa [[SRC]], [[CPY2:%xmm[0-9]+]]
+; CHECK-NEXT: punpckhbw [[SRC]],
+; Check that CPY1 is not redefined.
+; CHECK-NOT: , [[CPY1]]
+; undef use, we do not care.
+; CHECK: punpcklwd [[CPY1]],
+; Check that CPY1 is not redefined.
+; CHECK-NOT: , [[CPY1]]
+; CHECK: punpcklbw [[CPY2]], [[CPY2]]
+; CHECK-NEXT: punpckhwd [[CPY2]], [[CPY2]]
+; CHECK-NEXT pslld $31, [[CPY2]]
+; Check that CPY1 is not redefined.
+; CHECK-NOT: , [[CPY1]]
+; CHECK: punpcklbw [[CPY1]], [[CPY1]]
+; CHECK-NEXT: punpcklwd [[CPY1]], [[CPY1]]
+; CHECK-NEXT pslld $31, [[CPY1]]
+define <16 x float> @foo(<16 x float> %x) {
+bb:
+ %v3 = icmp slt <16 x i32> undef, zeroinitializer
+ %v14 = zext <16 x i1> %v3 to <16 x i32>
+ %v16 = fcmp olt <16 x float> %x, zeroinitializer
+ %v17 = sext <16 x i1> %v16 to <16 x i32>
+ %v18 = zext <16 x i1> %v16 to <16 x i32>
+ %v19 = xor <16 x i32> %v14, %v18
+ %v20 = or <16 x i32> %v17, undef
+ %v21 = fptosi <16 x float> %x to <16 x i32>
+ %v22 = sitofp <16 x i32> %v21 to <16 x float>
+ %v69 = fcmp ogt <16 x float> %v22, zeroinitializer
+ %v75 = and <16 x i1> %v69, %v3
+ %v77 = bitcast <16 x float> %v22 to <16 x i32>
+ %v79 = sext <16 x i1> %v75 to <16 x i32>
+ %v80 = and <16 x i32> undef, %v79
+ %v81 = xor <16 x i32> %v77, %v80
+ %v82 = and <16 x i32> undef, %v81
+ %v83 = xor <16 x i32> %v19, %v82
+ %v84 = and <16 x i32> %v83, %v20
+ %v85 = xor <16 x i32> %v19, %v84
+ %v86 = bitcast <16 x i32> %v85 to <16 x float>
+ ret <16 x float> %v86
+}
diff --git a/test/CodeGen/X86/pic.ll b/test/CodeGen/X86/pic.ll
index d543deb804d1b..73be234db81c0 100644
--- a/test/CodeGen/X86/pic.ll
+++ b/test/CodeGen/X86/pic.ll
@@ -196,9 +196,11 @@ bb12:
; LINUX-NEXT: .LJTI7_0:
; LINUX: .long .LBB7_2@GOTOFF
; LINUX: .long .LBB7_8@GOTOFF
-; LINUX: .long .LBB7_14@GOTOFF
-; LINUX: .long .LBB7_9@GOTOFF
-; LINUX: .long .LBB7_10@GOTOFF
+; LINUX: .long .LBB7_4@GOTOFF
+; LINUX: .long .LBB7_6@GOTOFF
+; LINUX: .long .LBB7_5@GOTOFF
+; LINUX: .long .LBB7_8@GOTOFF
+; LINUX: .long .LBB7_7@GOTOFF
}
declare void @foo1(...)
diff --git a/test/CodeGen/X86/pr23603.ll b/test/CodeGen/X86/pr23603.ll
new file mode 100644
index 0000000000000..6f856aedb8d58
--- /dev/null
+++ b/test/CodeGen/X86/pr23603.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+
+declare void @free_v()
+
+define void @f(i32* %x, i32 %c32, i32* %y) {
+; CHECK-LABEL: f
+ entry:
+ %v = load i32, i32* %x, !invariant.load !0
+; CHECK: movl (%rdi), %ebx
+; CHECK: free_v
+; CHECK-NOT: movl (%rdi), %ebx
+ call void @free_v()
+ %c = icmp ne i32 %c32, 0
+ br i1 %c, label %left, label %merge
+
+ left:
+ store i32 %v, i32* %y
+ br label %merge
+
+ merge:
+ ret void
+}
+
+!0 = !{}
diff --git a/test/CodeGen/X86/pr23664.ll b/test/CodeGen/X86/pr23664.ll
new file mode 100644
index 0000000000000..a501c0db837e3
--- /dev/null
+++ b/test/CodeGen/X86/pr23664.ll
@@ -0,0 +1,14 @@
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define i2 @f(i32 %arg) {
+ %trunc = trunc i32 %arg to i1
+ %sext = sext i1 %trunc to i2
+ %or = or i2 %sext, 1
+ ret i2 %or
+
+; CHECK-LABEL: f:
+; CHECK: addb %dil, %dil
+; CHECK-NEXT: orb $1, %dil
+; CHECK-NEXT: movb %dil, %al
+; CHECK-NEXT: retq
+}
diff --git a/test/CodeGen/X86/recip-fastmath.ll b/test/CodeGen/X86/recip-fastmath.ll
index fcd077092dab3..7f1521a83bcfd 100644
--- a/test/CodeGen/X86/recip-fastmath.ll
+++ b/test/CodeGen/X86/recip-fastmath.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,use-recip-est | FileCheck %s --check-prefix=RECIP
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,use-recip-est -x86-recip-refinement-steps=2 | FileCheck %s --check-prefix=REFINE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx -recip=divf,vec-divf | FileCheck %s --check-prefix=RECIP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx -recip=divf:2,vec-divf:2 | FileCheck %s --check-prefix=REFINE
; If the target's divss/divps instructions are substantially
; slower than rcpss/rcpps with a Newton-Raphson refinement,
diff --git a/test/CodeGen/X86/sibcall-win64.ll b/test/CodeGen/X86/sibcall-win64.ll
index f7038726f9ca5..204e1f8b050ba 100644
--- a/test/CodeGen/X86/sibcall-win64.ll
+++ b/test/CodeGen/X86/sibcall-win64.ll
@@ -1,7 +1,11 @@
; RUN: llc < %s -mtriple=x86_64-pc-linux | FileCheck %s
declare x86_64_win64cc void @win64_callee(i32)
+declare x86_64_win64cc void (i32)* @win64_indirect()
+declare x86_64_win64cc void @win64_other(i32)
declare void @sysv_callee(i32)
+declare void (i32)* @sysv_indirect()
+declare void @sysv_other(i32)
define void @sysv_caller(i32 %p1) {
entry:
@@ -40,3 +44,23 @@ define x86_64_win64cc void @win64_matched(i32 %p1) {
; CHECK-LABEL: win64_matched:
; CHECK: jmp win64_callee # TAILCALL
+
+define x86_64_win64cc void @win64_indirect_caller(i32 %p1) {
+ %1 = call x86_64_win64cc void (i32)* @win64_indirect()
+ call x86_64_win64cc void @win64_other(i32 0)
+ tail call x86_64_win64cc void %1(i32 %p1)
+ ret void
+}
+
+; CHECK-LABEL: win64_indirect_caller:
+; CHECK: jmpq *%{{rax|rcx|rdx|r8|r9|r11}} # TAILCALL
+
+define void @sysv_indirect_caller(i32 %p1) {
+ %1 = call void (i32)* @sysv_indirect()
+ call void @sysv_other(i32 0)
+ tail call void %1(i32 %p1)
+ ret void
+}
+
+; CHECK-LABEL: sysv_indirect_caller:
+; CHECK: jmpq *%{{rax|rcx|rdx|rsi|rdi|r8|r9|r11}} # TAILCALL
diff --git a/test/CodeGen/X86/sqrt-fastmath.ll b/test/CodeGen/X86/sqrt-fastmath.ll
index 4c6b521156e04..373fa53c970f8 100644
--- a/test/CodeGen/X86/sqrt-fastmath.ll
+++ b/test/CodeGen/X86/sqrt-fastmath.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,use-sqrt-est | FileCheck %s --check-prefix=ESTIMATE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx -recip=sqrtf,vec-sqrtf | FileCheck %s --check-prefix=ESTIMATE
declare double @__sqrt_finite(double) #0
declare float @__sqrtf_finite(float) #0
diff --git a/test/CodeGen/X86/stack-folding-x86_64.ll b/test/CodeGen/X86/stack-folding-x86_64.ll
new file mode 100644
index 0000000000000..211227916a09b
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-x86_64.ll
@@ -0,0 +1,51 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mcpu=x86-64 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+;TODO stack_fold_bsf_i16
+declare i16 @llvm.cttz.i16(i16, i1)
+
+define i32 @stack_fold_bsf_i32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_bsf_i32
+ ;CHECK: bsfl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call i32 @llvm.cttz.i32(i32 %a0, i1 -1)
+ ret i32 %2
+}
+declare i32 @llvm.cttz.i32(i32, i1)
+
+define i64 @stack_fold_bsf_i64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_bsf_i64
+ ;CHECK: bsfq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call i64 @llvm.cttz.i64(i64 %a0, i1 -1)
+ ret i64 %2
+}
+declare i64 @llvm.cttz.i64(i64, i1)
+
+;TODO stack_fold_bsr_i16
+declare i16 @llvm.ctlz.i16(i16, i1)
+
+define i32 @stack_fold_bsr_i32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_bsr_i32
+ ;CHECK: bsrl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call i32 @llvm.ctlz.i32(i32 %a0, i1 -1)
+ ret i32 %2
+}
+declare i32 @llvm.ctlz.i32(i32, i1)
+
+define i64 @stack_fold_bsr_i64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_bsr_i64
+ ;CHECK: bsrq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call i64 @llvm.ctlz.i64(i64 %a0, i1 -1)
+ ret i64 %2
+}
+declare i64 @llvm.ctlz.i64(i64, i1)
diff --git a/test/CodeGen/X86/statepoint-far-call.ll b/test/CodeGen/X86/statepoint-far-call.ll
new file mode 100644
index 0000000000000..cd8dd0f35a204
--- /dev/null
+++ b/test/CodeGen/X86/statepoint-far-call.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s | FileCheck %s
+; Test to check that Statepoints with X64 far-immediate targets
+; are lowered correctly to an indirect call via a scratch register.
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-win64"
+
+define void @test_far_call() gc "statepoint-example" {
+; CHECK-LABEL: test_far_call
+; CHECK: pushq %rax
+; CHECK: movabsq $140727162896504, %rax
+; CHECK: callq *%rax
+; CHECK: popq %rax
+; CHECK: retq
+
+entry:
+ %safepoint_token = call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* inttoptr (i64 140727162896504 to void ()*), i32 0, i32 0, i32 0, i32 0)
+ ret void
+}
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+
diff --git a/test/CodeGen/X86/switch-or.ll b/test/CodeGen/X86/switch-or.ll
index 6e6b013d9fa89..4642accfff8d9 100644
--- a/test/CodeGen/X86/switch-or.ll
+++ b/test/CodeGen/X86/switch-or.ll
@@ -1,10 +1,11 @@
; RUN: llc -march=x86 -asm-verbose=false < %s | FileCheck %s
; Check that merging switch cases that differ in one bit works.
+; CHECK-LABEL: test1
; CHECK: orl $2
; CHECK-NEXT: cmpl $6
-define void @foo(i32 %variable) nounwind {
+define void @test1(i32 %variable) nounwind {
entry:
switch i32 %variable, label %if.end [
i32 4, label %if.then
@@ -19,4 +20,22 @@ if.end:
ret void
}
+; CHECK-LABEL: test2
+; CHECK: orl $-2147483648
+; CHECK-NEXT: cmpl $-2147483648
+define void @test2(i32 %variable) nounwind {
+entry:
+ switch i32 %variable, label %if.end [
+ i32 0, label %if.then
+ i32 -2147483648, label %if.then
+ ]
+
+if.then:
+ %call = tail call i32 (...) @bar() nounwind
+ ret void
+
+if.end:
+ ret void
+}
+
declare i32 @bar(...) nounwind
diff --git a/test/CodeGen/X86/switch.ll b/test/CodeGen/X86/switch.ll
index 66a739c8470c4..a4dece65479c6 100644
--- a/test/CodeGen/X86/switch.ll
+++ b/test/CodeGen/X86/switch.ll
@@ -534,3 +534,18 @@ return: ret void
; CHECK-NOT: cmpl
; CHECK: cmpl $99
}
+
+
+define void @pr23738(i4 %x) {
+entry:
+ switch i4 %x, label %bb0 [
+ i4 0, label %bb1
+ i4 1, label %bb1
+ i4 -5, label %bb1
+ ]
+bb0: tail call void @g(i32 0) br label %return
+bb1: tail call void @g(i32 1) br label %return
+return: ret void
+; Don't assert due to truncating the bitwidth (64) to i4 when checking
+; that the bit-test range fits in a word.
+}
diff --git a/test/CodeGen/X86/tail-call-got.ll b/test/CodeGen/X86/tail-call-got.ll
index 84d561dcd8c3f..20d1a87b626a3 100644
--- a/test/CodeGen/X86/tail-call-got.ll
+++ b/test/CodeGen/X86/tail-call-got.ll
@@ -1,12 +1,14 @@
; RUN: llc < %s -relocation-model=pic -mattr=+sse2 | FileCheck %s
+; We used to do tail calls through the GOT for these symbols, but it was
+; disabled due to PR15086.
+
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
target triple = "i386-unknown-freebsd9.0"
define double @test1(double %x) nounwind readnone {
; CHECK-LABEL: test1:
-; CHECK: movl foo@GOT
-; CHECK-NEXT: jmpl
+; CHECK: calll foo@PLT
%1 = tail call double @foo(double %x) nounwind readnone
ret double %1
}
@@ -15,10 +17,18 @@ declare double @foo(double) readnone
define double @test2(double %x) nounwind readnone {
; CHECK-LABEL: test2:
-; CHECK: movl sin@GOT
-; CHECK-NEXT: jmpl
+; CHECK: calll sin@PLT
%1 = tail call double @sin(double %x) nounwind readnone
ret double %1
}
declare double @sin(double) readnone
+
+define double @test3(double %x) nounwind readnone {
+; CHECK-LABEL: test3:
+; CHECK: calll sin2@PLT
+ %1 = tail call double @sin2(double %x) nounwind readnone
+ ret double %1
+}
+
+declare double @sin2(double) readnone
diff --git a/test/CodeGen/X86/tailcallpic1.ll b/test/CodeGen/X86/tailcallpic1.ll
index ff590a1fd3e98..ed101fcccd2db 100644
--- a/test/CodeGen/X86/tailcallpic1.ll
+++ b/test/CodeGen/X86/tailcallpic1.ll
@@ -1,5 +1,8 @@
; RUN: llc < %s -tailcallopt -mtriple=i686-pc-linux-gnu -relocation-model=pic | FileCheck %s
+; This test uses guaranteed TCO so these will be tail calls, despite the early
+; binding issues.
+
define protected fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
entry:
ret i32 %a3
diff --git a/test/CodeGen/X86/tailcallpic3.ll b/test/CodeGen/X86/tailcallpic3.ll
new file mode 100644
index 0000000000000..edc58052d82f6
--- /dev/null
+++ b/test/CodeGen/X86/tailcallpic3.ll
@@ -0,0 +1,73 @@
+; RUN: llc < %s -mtriple=i686-pc-linux-gnu -relocation-model=pic | FileCheck %s
+
+; While many of these could be tail called, we don't do it because it forces
+; early binding.
+
+declare void @external()
+
+define hidden void @tailcallee_hidden() {
+entry:
+ ret void
+}
+
+define void @tailcall_hidden() {
+entry:
+ tail call void @tailcallee_hidden()
+ ret void
+}
+; CHECK: tailcall_hidden:
+; CHECK: jmp tailcallee_hidden
+
+define internal void @tailcallee_internal() {
+entry:
+ ret void
+}
+
+define void @tailcall_internal() {
+entry:
+ tail call void @tailcallee_internal()
+ ret void
+}
+; CHECK: tailcall_internal:
+; CHECK: jmp tailcallee_internal
+
+define default void @tailcallee_default() {
+entry:
+ ret void
+}
+
+define void @tailcall_default() {
+entry:
+ tail call void @tailcallee_default()
+ ret void
+}
+; CHECK: tailcall_default:
+; CHECK: calll tailcallee_default@PLT
+
+define void @tailcallee_default_implicit() {
+entry:
+ ret void
+}
+
+define void @tailcall_default_implicit() {
+entry:
+ tail call void @tailcallee_default_implicit()
+ ret void
+}
+; CHECK: tailcall_default_implicit:
+; CHECK: calll tailcallee_default_implicit@PLT
+
+define void @tailcall_external() {
+ tail call void @external()
+ ret void
+}
+; CHECK: tailcall_external:
+; CHECK: calll external@PLT
+
+define void @musttail_external() {
+ musttail call void @external()
+ ret void
+}
+; CHECK: musttail_external:
+; CHECK: movl external@GOT
+; CHECK: jmpl
diff --git a/test/CodeGen/X86/vec_fp_to_int.ll b/test/CodeGen/X86/vec_fp_to_int.ll
index 9f1c7afa295b5..3e72212d85d3e 100644
--- a/test/CodeGen/X86/vec_fp_to_int.ll
+++ b/test/CodeGen/X86/vec_fp_to_int.ll
@@ -239,7 +239,6 @@ define <4 x i64> @fptoui_4vf64(<4 x double> %a) {
; SSE2: # BB#0:
; SSE2-NEXT: movapd %xmm0, %xmm2
; SSE2-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
-; SSE2-NEXT: {{.*#+}} kill: XMM0<def> XMM2<kill>
; SSE2-NEXT: subsd %xmm3, %xmm0
; SSE2-NEXT: cvttsd2si %xmm0, %rcx
; SSE2-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
@@ -589,7 +588,6 @@ define <8 x i32> @fptoui_8vf32(<8 x float> %a) {
; SSE2-LABEL: fptoui_8vf32:
; SSE2: # BB#0:
; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: {{.*#+}} kill: XMM0<def> XMM2<kill>
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: cvttss2si %xmm0, %rax
; SSE2-NEXT: movd %eax, %xmm0
diff --git a/test/CodeGen/X86/vec_shift8.ll b/test/CodeGen/X86/vec_shift8.ll
new file mode 100644
index 0000000000000..a32cb30b0b262
--- /dev/null
+++ b/test/CodeGen/X86/vec_shift8.ll
@@ -0,0 +1,1016 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
+
+;
+; Vectorized integer shifts
+;
+
+define <2 x i64> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp {
+entry:
+; SSE2: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: pextrw $7, %xmm1, %ecx
+; SSE2-NEXT: shll %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-NEXT: pextrw $3, %xmm1, %ecx
+; SSE2-NEXT: shll %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-NEXT: pextrw $5, %xmm1, %ecx
+; SSE2-NEXT: shll %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-NEXT: pextrw $1, %xmm1, %ecx
+; SSE2-NEXT: shll %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-NEXT: pextrw $6, %xmm1, %ecx
+; SSE2-NEXT: shll %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-NEXT: pextrw $2, %xmm1, %ecx
+; SSE2-NEXT: shll %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-NEXT: pextrw $4, %xmm1, %ecx
+; SSE2-NEXT: shll %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: shll %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSE41: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: pextrw $1, %xmm1, %ecx
+; SSE41-NEXT: shll %cl, %eax
+; SSE41-NEXT: movd %xmm0, %edx
+; SSE41-NEXT: movd %xmm1, %ecx
+; SSE41-NEXT: shll %cl, %edx
+; SSE41-NEXT: movd %edx, %xmm2
+; SSE41-NEXT: pinsrw $1, %eax, %xmm2
+; SSE41-NEXT: pextrw $2, %xmm0, %eax
+; SSE41-NEXT: pextrw $2, %xmm1, %ecx
+; SSE41-NEXT: shll %cl, %eax
+; SSE41-NEXT: pinsrw $2, %eax, %xmm2
+; SSE41-NEXT: pextrw $3, %xmm0, %eax
+; SSE41-NEXT: pextrw $3, %xmm1, %ecx
+; SSE41-NEXT: shll %cl, %eax
+; SSE41-NEXT: pinsrw $3, %eax, %xmm2
+; SSE41-NEXT: pextrw $4, %xmm0, %eax
+; SSE41-NEXT: pextrw $4, %xmm1, %ecx
+; SSE41-NEXT: shll %cl, %eax
+; SSE41-NEXT: pinsrw $4, %eax, %xmm2
+; SSE41-NEXT: pextrw $5, %xmm0, %eax
+; SSE41-NEXT: pextrw $5, %xmm1, %ecx
+; SSE41-NEXT: shll %cl, %eax
+; SSE41-NEXT: pinsrw $5, %eax, %xmm2
+; SSE41-NEXT: pextrw $6, %xmm0, %eax
+; SSE41-NEXT: pextrw $6, %xmm1, %ecx
+; SSE41-NEXT: shll %cl, %eax
+; SSE41-NEXT: pinsrw $6, %eax, %xmm2
+; SSE41-NEXT: pextrw $7, %xmm0, %eax
+; SSE41-NEXT: pextrw $7, %xmm1, %ecx
+; SSE41-NEXT: shll %cl, %eax
+; SSE41-NEXT: pinsrw $7, %eax, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX: vpextrw $1, %xmm0, %eax
+; AVX-NEXT: vpextrw $1, %xmm1, %ecx
+; AVX-NEXT: shll %cl, %eax
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: vmovd %xmm1, %ecx
+; AVX-NEXT: shll %cl, %edx
+; AVX-NEXT: vmovd %edx, %xmm2
+; AVX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $2, %xmm0, %eax
+; AVX-NEXT: vpextrw $2, %xmm1, %ecx
+; AVX-NEXT: shll %cl, %eax
+; AVX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $3, %xmm0, %eax
+; AVX-NEXT: vpextrw $3, %xmm1, %ecx
+; AVX-NEXT: shll %cl, %eax
+; AVX-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $4, %xmm0, %eax
+; AVX-NEXT: vpextrw $4, %xmm1, %ecx
+; AVX-NEXT: shll %cl, %eax
+; AVX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: vpextrw $5, %xmm1, %ecx
+; AVX-NEXT: shll %cl, %eax
+; AVX-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $6, %xmm0, %eax
+; AVX-NEXT: vpextrw $6, %xmm1, %ecx
+; AVX-NEXT: shll %cl, %eax
+; AVX-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $7, %xmm0, %eax
+; AVX-NEXT: vpextrw $7, %xmm1, %ecx
+; AVX-NEXT: shll %cl, %eax
+; AVX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %shl = shl <8 x i16> %r, %a
+ %tmp2 = bitcast <8 x i16> %shl to <2 x i64>
+ ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @shl_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
+entry:
+; SSE2: psllw $5, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: pcmpeqb %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm0, %xmm4
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: pcmpeqb %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm0, %xmm4
+; SSE2-NEXT: psllw $2, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm0, %xmm2
+; SSE2-NEXT: paddb %xmm0, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psllw $5, %xmm1
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm5
+; SSE41-NEXT: paddb %xmm5, %xmm5
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pand %xmm5, %xmm4
+; SSE41-NEXT: pcmpeqb %xmm3, %xmm4
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pcmpeqb %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm2, %xmm6
+; SSE41-NEXT: psllw $4, %xmm6
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm6
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pblendvb %xmm6, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: psllw $2, %xmm1
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: paddb %xmm1, %xmm1
+; SSE41-NEXT: paddb %xmm5, %xmm5
+; SSE41-NEXT: pand %xmm3, %xmm5
+; SSE41-NEXT: pcmpeqb %xmm5, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX: vpsllw $5, %xmm1, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX-NEXT: vpand %xmm2, %xmm3, %xmm4
+; AVX-NEXT: vpcmpeqb %xmm3, %xmm4, %xmm4
+; AVX-NEXT: vpand %xmm1, %xmm3, %xmm1
+; AVX-NEXT: vpcmpeqb %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vpsllw $4, %xmm0, %xmm5
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm5, %xmm5
+; AVX-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
+; AVX-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpblendvb %xmm4, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpand %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shl = shl <16 x i8> %r, %a
+ %tmp2 = bitcast <16 x i8> %shl to <2 x i64>
+ ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp {
+entry:
+; SSE2: pextrw $7, %xmm1, %ecx
+; SSE2-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: sarw %cl, %ax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pextrw $3, %xmm1, %ecx
+; SSE2-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-NEXT: sarw %cl, %ax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: pextrw $5, %xmm1, %ecx
+; SSE2-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-NEXT: sarw %cl, %ax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: pextrw $1, %xmm1, %ecx
+; SSE2-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-NEXT: sarw %cl, %ax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-NEXT: pextrw $6, %xmm1, %ecx
+; SSE2-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-NEXT: sarw %cl, %ax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: pextrw $2, %xmm1, %ecx
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-NEXT: sarw %cl, %ax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: pextrw $4, %xmm1, %ecx
+; SSE2-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-NEXT: sarw %cl, %ax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: sarw %cl, %ax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSE41: pextrw $1, %xmm1, %ecx
+; SSE41-NEXT: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: sarw %cl, %ax
+; SSE41-NEXT: movd %xmm1, %ecx
+; SSE41-NEXT: movd %xmm0, %edx
+; SSE41-NEXT: sarw %cl, %dx
+; SSE41-NEXT: movd %edx, %xmm2
+; SSE41-NEXT: pinsrw $1, %eax, %xmm2
+; SSE41-NEXT: pextrw $2, %xmm1, %ecx
+; SSE41-NEXT: pextrw $2, %xmm0, %eax
+; SSE41-NEXT: sarw %cl, %ax
+; SSE41-NEXT: pinsrw $2, %eax, %xmm2
+; SSE41-NEXT: pextrw $3, %xmm1, %ecx
+; SSE41-NEXT: pextrw $3, %xmm0, %eax
+; SSE41-NEXT: sarw %cl, %ax
+; SSE41-NEXT: pinsrw $3, %eax, %xmm2
+; SSE41-NEXT: pextrw $4, %xmm1, %ecx
+; SSE41-NEXT: pextrw $4, %xmm0, %eax
+; SSE41-NEXT: sarw %cl, %ax
+; SSE41-NEXT: pinsrw $4, %eax, %xmm2
+; SSE41-NEXT: pextrw $5, %xmm1, %ecx
+; SSE41-NEXT: pextrw $5, %xmm0, %eax
+; SSE41-NEXT: sarw %cl, %ax
+; SSE41-NEXT: pinsrw $5, %eax, %xmm2
+; SSE41-NEXT: pextrw $6, %xmm1, %ecx
+; SSE41-NEXT: pextrw $6, %xmm0, %eax
+; SSE41-NEXT: sarw %cl, %ax
+; SSE41-NEXT: pinsrw $6, %eax, %xmm2
+; SSE41-NEXT: pextrw $7, %xmm1, %ecx
+; SSE41-NEXT: pextrw $7, %xmm0, %eax
+; SSE41-NEXT: sarw %cl, %ax
+; SSE41-NEXT: pinsrw $7, %eax, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX: vpextrw $1, %xmm1, %ecx
+; AVX-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-NEXT: sarw %cl, %ax
+; AVX-NEXT: vmovd %xmm1, %ecx
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: sarw %cl, %dx
+; AVX-NEXT: vmovd %edx, %xmm2
+; AVX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $2, %xmm1, %ecx
+; AVX-NEXT: vpextrw $2, %xmm0, %eax
+; AVX-NEXT: sarw %cl, %ax
+; AVX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $3, %xmm1, %ecx
+; AVX-NEXT: vpextrw $3, %xmm0, %eax
+; AVX-NEXT: sarw %cl, %ax
+; AVX-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $4, %xmm1, %ecx
+; AVX-NEXT: vpextrw $4, %xmm0, %eax
+; AVX-NEXT: sarw %cl, %ax
+; AVX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $5, %xmm1, %ecx
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: sarw %cl, %ax
+; AVX-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $6, %xmm1, %ecx
+; AVX-NEXT: vpextrw $6, %xmm0, %eax
+; AVX-NEXT: sarw %cl, %ax
+; AVX-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $7, %xmm1, %ecx
+; AVX-NEXT: vpextrw $7, %xmm0, %eax
+; AVX-NEXT: sarw %cl, %ax
+; AVX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %ashr = ashr <8 x i16> %r, %a
+ %tmp2 = bitcast <8 x i16> %ashr to <2 x i64>
+ ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @ashr_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
+entry:
+;
+; SSE2: pushq %rbp
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm1, -24(%rsp)
+; SSE2-NEXT: movaps %xmm0, -40(%rsp)
+; SSE2-NEXT: movb -9(%rsp), %cl
+; SSE2-NEXT: movb -25(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movb -17(%rsp), %cl
+; SSE2-NEXT: movb -33(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -13(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movl %eax, -44(%rsp)
+; SSE2-NEXT: movb -29(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movzbl %al, %r9d
+; SSE2-NEXT: movb -21(%rsp), %cl
+; SSE2-NEXT: movb -37(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -11(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r10d
+; SSE2-NEXT: movb -27(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -19(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r11d
+; SSE2-NEXT: movb -35(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -15(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r14d
+; SSE2-NEXT: movb -31(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movzbl %al, %r15d
+; SSE2-NEXT: movb -23(%rsp), %cl
+; SSE2-NEXT: movb -39(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -10(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r12d
+; SSE2-NEXT: movb -26(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -18(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r13d
+; SSE2-NEXT: movb -34(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -14(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r8d
+; SSE2-NEXT: movb -30(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -22(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %ebp
+; SSE2-NEXT: movb -38(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movb -12(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %edi
+; SSE2-NEXT: movb -28(%rsp), %dl
+; SSE2-NEXT: sarb %cl, %dl
+; SSE2-NEXT: movb -20(%rsp), %cl
+; SSE2-NEXT: movzbl %dl, %esi
+; SSE2-NEXT: movb -36(%rsp), %bl
+; SSE2-NEXT: sarb %cl, %bl
+; SSE2-NEXT: movb -16(%rsp), %cl
+; SSE2-NEXT: movzbl %bl, %ebx
+; SSE2-NEXT: movb -32(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movzbl %al, %edx
+; SSE2-NEXT: movb -24(%rsp), %cl
+; SSE2-NEXT: movb -40(%rsp), %al
+; SSE2-NEXT: sarb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd -44(%rsp), %xmm1
+; SSE2: movd %r9d, %xmm2
+; SSE2-NEXT: movd %r10d, %xmm3
+; SSE2-NEXT: movd %r11d, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %r14d, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movd %r15d, %xmm1
+; SSE2-NEXT: movd %r12d, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: movd %r13d, %xmm0
+; SSE2-NEXT: movd %r8d, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %ebp, %xmm0
+; SSE2-NEXT: movd %edi, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: movd %esi, %xmm0
+; SSE2-NEXT: movd %ebx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %edx, %xmm4
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE41: pextrb $1, %xmm1, %ecx
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pextrb $0, %xmm1, %ecx
+; SSE41-NEXT: pextrb $0, %xmm0, %edx
+; SSE41-NEXT: sarb %cl, %dl
+; SSE41-NEXT: movzbl %dl, %ecx
+; SSE41-NEXT: movd %ecx, %xmm2
+; SSE41-NEXT: pinsrb $1, %eax, %xmm2
+; SSE41-NEXT: pextrb $2, %xmm1, %ecx
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm2
+; SSE41-NEXT: pextrb $3, %xmm1, %ecx
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm2
+; SSE41-NEXT: pextrb $4, %xmm1, %ecx
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm2
+; SSE41-NEXT: pextrb $5, %xmm1, %ecx
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm2
+; SSE41-NEXT: pextrb $6, %xmm1, %ecx
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm2
+; SSE41-NEXT: pextrb $7, %xmm1, %ecx
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm2
+; SSE41-NEXT: pextrb $8, %xmm1, %ecx
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm2
+; SSE41-NEXT: pextrb $9, %xmm1, %ecx
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm2
+; SSE41-NEXT: pextrb $10, %xmm1, %ecx
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm2
+; SSE41-NEXT: pextrb $11, %xmm1, %ecx
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm2
+; SSE41-NEXT: pextrb $12, %xmm1, %ecx
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm2
+; SSE41-NEXT: pextrb $13, %xmm1, %ecx
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm2
+; SSE41-NEXT: pextrb $14, %xmm1, %ecx
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm2
+; SSE41-NEXT: pextrb $15, %xmm1, %ecx
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: sarb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX: vpextrb $1, %xmm1, %ecx
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX-NEXT: vpextrb $0, %xmm0, %edx
+; AVX-NEXT: sarb %cl, %dl
+; AVX-NEXT: movzbl %dl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm2
+; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm1, %ecx
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm1, %ecx
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm1, %ecx
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm1, %ecx
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm1, %ecx
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm1, %ecx
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm1, %ecx
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm1, %ecx
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm1, %ecx
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm1, %ecx
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm1, %ecx
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm1, %ecx
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm1, %ecx
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm1, %ecx
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: sarb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %ashr = ashr <16 x i8> %r, %a
+ %tmp2 = bitcast <16 x i8> %ashr to <2 x i64>
+ ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp {
+entry:
+
+; SSE2: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: pextrw $7, %xmm1, %ecx
+; SSE2-NEXT: shrl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-NEXT: pextrw $3, %xmm1, %ecx
+; SSE2-NEXT: shrl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-NEXT: pextrw $5, %xmm1, %ecx
+; SSE2-NEXT: shrl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-NEXT: pextrw $1, %xmm1, %ecx
+; SSE2-NEXT: shrl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-NEXT: pextrw $6, %xmm1, %ecx
+; SSE2-NEXT: shrl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-NEXT: pextrw $2, %xmm1, %ecx
+; SSE2-NEXT: shrl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-NEXT: pextrw $4, %xmm1, %ecx
+; SSE2-NEXT: shrl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: movzwl %ax, %eax
+; SSE2-NEXT: shrl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSE41: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: pextrw $1, %xmm1, %ecx
+; SSE41-NEXT: shrl %cl, %eax
+; SSE41-NEXT: movd %xmm1, %ecx
+; SSE41-NEXT: movd %xmm0, %edx
+; SSE41-NEXT: movzwl %dx, %edx
+; SSE41-NEXT: shrl %cl, %edx
+; SSE41-NEXT: movd %edx, %xmm2
+; SSE41-NEXT: pinsrw $1, %eax, %xmm2
+; SSE41-NEXT: pextrw $2, %xmm0, %eax
+; SSE41-NEXT: pextrw $2, %xmm1, %ecx
+; SSE41-NEXT: shrl %cl, %eax
+; SSE41-NEXT: pinsrw $2, %eax, %xmm2
+; SSE41-NEXT: pextrw $3, %xmm0, %eax
+; SSE41-NEXT: pextrw $3, %xmm1, %ecx
+; SSE41-NEXT: shrl %cl, %eax
+; SSE41-NEXT: pinsrw $3, %eax, %xmm2
+; SSE41-NEXT: pextrw $4, %xmm0, %eax
+; SSE41-NEXT: pextrw $4, %xmm1, %ecx
+; SSE41-NEXT: shrl %cl, %eax
+; SSE41-NEXT: pinsrw $4, %eax, %xmm2
+; SSE41-NEXT: pextrw $5, %xmm0, %eax
+; SSE41-NEXT: pextrw $5, %xmm1, %ecx
+; SSE41-NEXT: shrl %cl, %eax
+; SSE41-NEXT: pinsrw $5, %eax, %xmm2
+; SSE41-NEXT: pextrw $6, %xmm0, %eax
+; SSE41-NEXT: pextrw $6, %xmm1, %ecx
+; SSE41-NEXT: shrl %cl, %eax
+; SSE41-NEXT: pinsrw $6, %eax, %xmm2
+; SSE41-NEXT: pextrw $7, %xmm0, %eax
+; SSE41-NEXT: pextrw $7, %xmm1, %ecx
+; SSE41-NEXT: shrl %cl, %eax
+; SSE41-NEXT: pinsrw $7, %eax, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX: vpextrw $1, %xmm0, %eax
+; AVX-NEXT: vpextrw $1, %xmm1, %ecx
+; AVX-NEXT: shrl %cl, %eax
+; AVX-NEXT: vmovd %xmm1, %ecx
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: movzwl %dx, %edx
+; AVX-NEXT: shrl %cl, %edx
+; AVX-NEXT: vmovd %edx, %xmm2
+; AVX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $2, %xmm0, %eax
+; AVX-NEXT: vpextrw $2, %xmm1, %ecx
+; AVX-NEXT: shrl %cl, %eax
+; AVX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $3, %xmm0, %eax
+; AVX-NEXT: vpextrw $3, %xmm1, %ecx
+; AVX-NEXT: shrl %cl, %eax
+; AVX-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $4, %xmm0, %eax
+; AVX-NEXT: vpextrw $4, %xmm1, %ecx
+; AVX-NEXT: shrl %cl, %eax
+; AVX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: vpextrw $5, %xmm1, %ecx
+; AVX-NEXT: shrl %cl, %eax
+; AVX-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $6, %xmm0, %eax
+; AVX-NEXT: vpextrw $6, %xmm1, %ecx
+; AVX-NEXT: shrl %cl, %eax
+; AVX-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrw $7, %xmm0, %eax
+; AVX-NEXT: vpextrw $7, %xmm1, %ecx
+; AVX-NEXT: shrl %cl, %eax
+; AVX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %lshr = lshr <8 x i16> %r, %a
+ %tmp2 = bitcast <8 x i16> %lshr to <2 x i64>
+ ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @lshr_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
+entry:
+; SSE2: pushq %rbp
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm1, -24(%rsp)
+; SSE2-NEXT: movaps %xmm0, -40(%rsp)
+; SSE2-NEXT: movb -9(%rsp), %cl
+; SSE2-NEXT: movb -25(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movb -17(%rsp), %cl
+; SSE2-NEXT: movb -33(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -13(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movl %eax, -44(%rsp)
+; SSE2-NEXT: movb -29(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movzbl %al, %r9d
+; SSE2-NEXT: movb -21(%rsp), %cl
+; SSE2-NEXT: movb -37(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -11(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r10d
+; SSE2-NEXT: movb -27(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -19(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r11d
+; SSE2-NEXT: movb -35(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -15(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r14d
+; SSE2-NEXT: movb -31(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movzbl %al, %r15d
+; SSE2-NEXT: movb -23(%rsp), %cl
+; SSE2-NEXT: movb -39(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -10(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r12d
+; SSE2-NEXT: movb -26(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -18(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r13d
+; SSE2-NEXT: movb -34(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -14(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %r8d
+; SSE2-NEXT: movb -30(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -22(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %ebp
+; SSE2-NEXT: movb -38(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movb -12(%rsp), %cl
+; SSE2-NEXT: movzbl %al, %edi
+; SSE2-NEXT: movb -28(%rsp), %dl
+; SSE2-NEXT: shrb %cl, %dl
+; SSE2-NEXT: movb -20(%rsp), %cl
+; SSE2-NEXT: movzbl %dl, %esi
+; SSE2-NEXT: movb -36(%rsp), %bl
+; SSE2-NEXT: shrb %cl, %bl
+; SSE2-NEXT: movb -16(%rsp), %cl
+; SSE2-NEXT: movzbl %bl, %ebx
+; SSE2-NEXT: movb -32(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movzbl %al, %edx
+; SSE2-NEXT: movb -24(%rsp), %cl
+; SSE2-NEXT: movb -40(%rsp), %al
+; SSE2-NEXT: shrb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd -44(%rsp), %xmm1
+; SSE2: movd %r9d, %xmm2
+; SSE2-NEXT: movd %r10d, %xmm3
+; SSE2-NEXT: movd %r11d, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %r14d, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movd %r15d, %xmm1
+; SSE2-NEXT: movd %r12d, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: movd %r13d, %xmm0
+; SSE2-NEXT: movd %r8d, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %ebp, %xmm0
+; SSE2-NEXT: movd %edi, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: movd %esi, %xmm0
+; SSE2-NEXT: movd %ebx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %edx, %xmm4
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE41: pextrb $1, %xmm1, %ecx
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pextrb $0, %xmm1, %ecx
+; SSE41-NEXT: pextrb $0, %xmm0, %edx
+; SSE41-NEXT: shrb %cl, %dl
+; SSE41-NEXT: movzbl %dl, %ecx
+; SSE41-NEXT: movd %ecx, %xmm2
+; SSE41-NEXT: pinsrb $1, %eax, %xmm2
+; SSE41-NEXT: pextrb $2, %xmm1, %ecx
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm2
+; SSE41-NEXT: pextrb $3, %xmm1, %ecx
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm2
+; SSE41-NEXT: pextrb $4, %xmm1, %ecx
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm2
+; SSE41-NEXT: pextrb $5, %xmm1, %ecx
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm2
+; SSE41-NEXT: pextrb $6, %xmm1, %ecx
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm2
+; SSE41-NEXT: pextrb $7, %xmm1, %ecx
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm2
+; SSE41-NEXT: pextrb $8, %xmm1, %ecx
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm2
+; SSE41-NEXT: pextrb $9, %xmm1, %ecx
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm2
+; SSE41-NEXT: pextrb $10, %xmm1, %ecx
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm2
+; SSE41-NEXT: pextrb $11, %xmm1, %ecx
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm2
+; SSE41-NEXT: pextrb $12, %xmm1, %ecx
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm2
+; SSE41-NEXT: pextrb $13, %xmm1, %ecx
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm2
+; SSE41-NEXT: pextrb $14, %xmm1, %ecx
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm2
+; SSE41-NEXT: pextrb $15, %xmm1, %ecx
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: shrb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX: vpextrb $1, %xmm1, %ecx
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX-NEXT: vpextrb $0, %xmm0, %edx
+; AVX-NEXT: shrb %cl, %dl
+; AVX-NEXT: movzbl %dl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm2
+; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm1, %ecx
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm1, %ecx
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm1, %ecx
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm1, %ecx
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm1, %ecx
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm1, %ecx
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm1, %ecx
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm1, %ecx
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm1, %ecx
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm1, %ecx
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm1, %ecx
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm1, %ecx
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm1, %ecx
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm1, %ecx
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: shrb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %lshr = lshr <16 x i8> %r, %a
+ %tmp2 = bitcast <16 x i8> %lshr to <2 x i64>
+ ret <2 x i64> %tmp2
+}
diff --git a/test/CodeGen/X86/vector-ctpop.ll b/test/CodeGen/X86/vector-ctpop.ll
deleted file mode 100644
index 59d67928c6fad..0000000000000
--- a/test/CodeGen/X86/vector-ctpop.ll
+++ /dev/null
@@ -1,159 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 | FileCheck -check-prefix=AVX2 %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx -mattr=-popcnt | FileCheck -check-prefix=AVX1-NOPOPCNT %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 -mattr=-popcnt | FileCheck -check-prefix=AVX2-NOPOPCNT %s
-
-; Vector version of:
-; v = v - ((v >> 1) & 0x55555555)
-; v = (v & 0x33333333) + ((v >> 2) & 0x33333333)
-; v = (v + (v >> 4) & 0xF0F0F0F)
-; v = v + (v >> 8)
-; v = v + (v >> 16)
-; v = v + (v >> 32) ; i64 only
-
-define <8 x i32> @test0(<8 x i32> %x) {
-; AVX2-LABEL: @test0
-entry:
-; AVX2: vpsrld $1, %ymm
-; AVX2-NEXT: vpbroadcastd
-; AVX2-NEXT: vpand
-; AVX2-NEXT: vpsubd
-; AVX2-NEXT: vpbroadcastd
-; AVX2-NEXT: vpand
-; AVX2-NEXT: vpsrld $2
-; AVX2-NEXT: vpand
-; AVX2-NEXT: vpaddd
-; AVX2-NEXT: vpsrld $4
-; AVX2-NEXT: vpaddd
-; AVX2-NEXT: vpbroadcastd
-; AVX2-NEXT: vpand
-; AVX2-NEXT: vpsrld $8
-; AVX2-NEXT: vpaddd
-; AVX2-NEXT: vpsrld $16
-; AVX2-NEXT: vpaddd
-; AVX2-NEXT: vpbroadcastd
-; AVX2-NEXT: vpand
- %y = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %x)
- ret <8 x i32> %y
-}
-
-define <4 x i64> @test1(<4 x i64> %x) {
-; AVX2-NOPOPCNT-LABEL: @test1
-entry:
-; AVX2-NOPOPCNT: vpsrlq $1, %ymm
-; AVX2-NOPOPCNT-NEXT: vpbroadcastq
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsubq
-; AVX2-NOPOPCNT-NEXT: vpbroadcastq
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsrlq $2
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpsrlq $4
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpbroadcastq
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsrlq $8
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpsrlq $16
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpsrlq $32
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpbroadcastq
-; AVX2-NOPOPCNT-NEXT: vpand
- %y = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %x)
- ret <4 x i64> %y
-}
-
-define <4 x i32> @test2(<4 x i32> %x) {
-; AVX2-NOPOPCNT-LABEL: @test2
-; AVX1-NOPOPCNT-LABEL: @test2
-entry:
-; AVX2-NOPOPCNT: vpsrld $1, %xmm
-; AVX2-NOPOPCNT-NEXT: vpbroadcastd
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsubd
-; AVX2-NOPOPCNT-NEXT: vpbroadcastd
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsrld $2
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpaddd
-; AVX2-NOPOPCNT-NEXT: vpsrld $4
-; AVX2-NOPOPCNT-NEXT: vpaddd
-; AVX2-NOPOPCNT-NEXT: vpbroadcastd
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsrld $8
-; AVX2-NOPOPCNT-NEXT: vpaddd
-; AVX2-NOPOPCNT-NEXT: vpsrld $16
-; AVX2-NOPOPCNT-NEXT: vpaddd
-; AVX2-NOPOPCNT-NEXT: vpbroadcastd
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT: vpsrld $1, %xmm
-; AVX1-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT-NEXT: vpsubd
-; AVX1-NOPOPCNT-NEXT: vmovdqa
-; AVX1-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT-NEXT: vpsrld $2
-; AVX1-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT-NEXT: vpaddd
-; AVX1-NOPOPCNT-NEXT: vpsrld $4
-; AVX1-NOPOPCNT-NEXT: vpaddd
-; AVX1-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT-NEXT: vpsrld $8
-; AVX1-NOPOPCNT-NEXT: vpaddd
-; AVX1-NOPOPCNT-NEXT: vpsrld $16
-; AVX1-NOPOPCNT-NEXT: vpaddd
-; AVX1-NOPOPCNT-NEXT: vpand
- %y = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %x)
- ret <4 x i32> %y
-}
-
-define <2 x i64> @test3(<2 x i64> %x) {
-; AVX2-NOPOPCNT-LABEL: @test3
-; AVX1-NOPOPCNT-LABEL: @test3
-entry:
-; AVX2-NOPOPCNT: vpsrlq $1, %xmm
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsubq
-; AVX2-NOPOPCNT-NEXT: vmovdqa
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsrlq $2
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpsrlq $4
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX2-NOPOPCNT-NEXT: vpsrlq $8
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpsrlq $16
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpsrlq $32
-; AVX2-NOPOPCNT-NEXT: vpaddq
-; AVX2-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT: vpsrlq $1, %xmm
-; AVX1-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT-NEXT: vpsubq
-; AVX1-NOPOPCNT-NEXT: vmovdqa
-; AVX1-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT-NEXT: vpsrlq $2
-; AVX1-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT-NEXT: vpaddq
-; AVX1-NOPOPCNT-NEXT: vpsrlq $4
-; AVX1-NOPOPCNT-NEXT: vpaddq
-; AVX1-NOPOPCNT-NEXT: vpand
-; AVX1-NOPOPCNT-NEXT: vpsrlq $8
-; AVX1-NOPOPCNT-NEXT: vpaddq
-; AVX1-NOPOPCNT-NEXT: vpsrlq $16
-; AVX1-NOPOPCNT-NEXT: vpaddq
-; AVX1-NOPOPCNT-NEXT: vpsrlq $32
-; AVX1-NOPOPCNT-NEXT: vpaddq
-; AVX1-NOPOPCNT-NEXT: vpand
- %y = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
- ret <2 x i64> %y
-}
-
-declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
-declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
-
-declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
-declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
-
diff --git a/test/CodeGen/X86/vector-lzcnt-128.ll b/test/CodeGen/X86/vector-lzcnt-128.ll
new file mode 100644
index 0000000000000..b43188b7c6ea1
--- /dev/null
+++ b/test/CodeGen/X86/vector-lzcnt-128.ll
@@ -0,0 +1,1915 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+target triple = "x86_64-unknown-unknown"
+
+define <2 x i64> @testv2i64(<2 x i64> %in) {
+; SSE2-LABEL: testv2i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: bsrq %rax, %rax
+; SSE2-NEXT: movl $127, %ecx
+; SSE2-NEXT: cmoveq %rcx, %rax
+; SSE2-NEXT: xorq $63, %rax
+; SSE2-NEXT: movd %rax, %xmm1
+; SSE2-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: bsrq %rax, %rax
+; SSE2-NEXT: cmoveq %rcx, %rax
+; SSE2-NEXT: xorq $63, %rax
+; SSE2-NEXT: movd %rax, %xmm0
+; SSE2-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv2i64:
+; SSE3: # BB#0:
+; SSE3-NEXT: movd %xmm0, %rax
+; SSE3-NEXT: bsrq %rax, %rax
+; SSE3-NEXT: movl $127, %ecx
+; SSE3-NEXT: cmoveq %rcx, %rax
+; SSE3-NEXT: xorq $63, %rax
+; SSE3-NEXT: movd %rax, %xmm1
+; SSE3-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %rax
+; SSE3-NEXT: bsrq %rax, %rax
+; SSE3-NEXT: cmoveq %rcx, %rax
+; SSE3-NEXT: xorq $63, %rax
+; SSE3-NEXT: movd %rax, %xmm0
+; SSE3-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv2i64:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: bsrq %rax, %rax
+; SSSE3-NEXT: movl $127, %ecx
+; SSSE3-NEXT: cmoveq %rcx, %rax
+; SSSE3-NEXT: xorq $63, %rax
+; SSSE3-NEXT: movd %rax, %xmm1
+; SSSE3-NEXT: pshufd $78, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: bsrq %rax, %rax
+; SSSE3-NEXT: cmoveq %rcx, %rax
+; SSSE3-NEXT: xorq $63, %rax
+; SSSE3-NEXT: movd %rax, %xmm0
+; SSSE3-NEXT: punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+
+;
+; SSE41-LABEL: testv2i64:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrq $1, %xmm0, %rax
+; SSE41-NEXT: bsrq %rax, %rax
+; SSE41-NEXT: movl $127, %ecx
+; SSE41-NEXT: cmoveq %rcx, %rax
+; SSE41-NEXT: xorq $63, %rax
+; SSE41-NEXT: movd %rax, %xmm1
+; SSE41-NEXT: movd %xmm0, %rax
+; SSE41-NEXT: bsrq %rax, %rax
+; SSE41-NEXT: cmoveq %rcx, %rax
+; SSE41-NEXT: xorq $63, %rax
+; SSE41-NEXT: movd %rax, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rax
+; AVX-NEXT: bsrq %rax, %rax
+; AVX-NEXT: movl $127, %ecx
+; AVX-NEXT: cmoveq %rcx, %rax
+; AVX-NEXT: xorq $63, %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: bsrq %rax, %rax
+; AVX-NEXT: cmoveq %rcx, %rax
+; AVX-NEXT: xorq $63, %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %in, i1 0)
+ ret <2 x i64> %out
+}
+
+define <2 x i64> @testv2i64u(<2 x i64> %in) {
+; SSE2-LABEL: testv2i64u:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: bsrq %rax, %rax
+; SSE2-NEXT: xorq $63, %rax
+; SSE2-NEXT: movd %rax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: bsrq %rax, %rax
+; SSE2-NEXT: xorq $63, %rax
+; SSE2-NEXT: movd %rax, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv2i64u:
+; SSE3: # BB#0:
+; SSE3-NEXT: movd %xmm0, %rax
+; SSE3-NEXT: bsrq %rax, %rax
+; SSE3-NEXT: xorq $63, %rax
+; SSE3-NEXT: movd %rax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %rax
+; SSE3-NEXT: bsrq %rax, %rax
+; SSE3-NEXT: xorq $63, %rax
+; SSE3-NEXT: movd %rax, %xmm0
+; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv2i64u:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: bsrq %rax, %rax
+; SSSE3-NEXT: xorq $63, %rax
+; SSSE3-NEXT: movd %rax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: bsrq %rax, %rax
+; SSSE3-NEXT: xorq $63, %rax
+; SSSE3-NEXT: movd %rax, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv2i64u:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrq $1, %xmm0, %rax
+; SSE41-NEXT: bsrq %rax, %rax
+; SSE41-NEXT: xorq $63, %rax
+; SSE41-NEXT: movd %rax, %xmm1
+; SSE41-NEXT: movd %xmm0, %rax
+; SSE41-NEXT: bsrq %rax, %rax
+; SSE41-NEXT: xorq $63, %rax
+; SSE41-NEXT: movd %rax, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv2i64u:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rax
+; AVX-NEXT: bsrq %rax, %rax
+; AVX-NEXT: xorq $63, %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: bsrq %rax, %rax
+; AVX-NEXT: xorq $63, %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %in, i1 -1)
+ ret <2 x i64> %out
+}
+
+define <4 x i32> @testv4i32(<4 x i32> %in) {
+; SSE2-LABEL: testv4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: movl $63, %ecx
+; SSE2-NEXT: cmovel %ecx, %eax
+; SSE2-NEXT: xorl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: cmovel %ecx, %eax
+; SSE2-NEXT: xorl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: cmovel %ecx, %eax
+; SSE2-NEXT: xorl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: cmovel %ecx, %eax
+; SSE2-NEXT: xorl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv4i32:
+; SSE3: # BB#0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm1, %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: movl $63, %ecx
+; SSE3-NEXT: cmovel %ecx, %eax
+; SSE3-NEXT: xorl $31, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE3-NEXT: movd %xmm2, %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: cmovel %ecx, %eax
+; SSE3-NEXT: xorl $31, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: cmovel %ecx, %eax
+; SSE3-NEXT: xorl $31, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: cmovel %ecx, %eax
+; SSE3-NEXT: xorl $31, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv4i32:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSSE3-NEXT: movd %xmm1, %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: movl $63, %ecx
+; SSSE3-NEXT: cmovel %ecx, %eax
+; SSSE3-NEXT: xorl $31, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSSE3-NEXT: movd %xmm2, %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: cmovel %ecx, %eax
+; SSSE3-NEXT: xorl $31, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: cmovel %ecx, %eax
+; SSSE3-NEXT: xorl $31, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: cmovel %ecx, %eax
+; SSSE3-NEXT: xorl $31, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrd $1, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: movl $63, %ecx
+; SSE41-NEXT: cmovel %ecx, %eax
+; SSE41-NEXT: xorl $31, %eax
+; SSE41-NEXT: movd %xmm0, %edx
+; SSE41-NEXT: bsrl %edx, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: xorl $31, %edx
+; SSE41-NEXT: movd %edx, %xmm1
+; SSE41-NEXT: pinsrd $1, %eax, %xmm1
+; SSE41-NEXT: pextrd $2, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: cmovel %ecx, %eax
+; SSE41-NEXT: xorl $31, %eax
+; SSE41-NEXT: pinsrd $2, %eax, %xmm1
+; SSE41-NEXT: pextrd $3, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: cmovel %ecx, %eax
+; SSE41-NEXT: xorl $31, %eax
+; SSE41-NEXT: pinsrd $3, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: movl $63, %ecx
+; AVX-NEXT: cmovel %ecx, %eax
+; AVX-NEXT: xorl $31, %eax
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: bsrl %edx, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: xorl $31, %edx
+; AVX-NEXT: vmovd %edx, %xmm1
+; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrd $2, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: cmovel %ecx, %eax
+; AVX-NEXT: xorl $31, %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrd $3, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: cmovel %ecx, %eax
+; AVX-NEXT: xorl $31, %eax
+; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %in, i1 0)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @testv4i32u(<4 x i32> %in) {
+; SSE2-LABEL: testv4i32u:
+; SSE2: # BB#0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: xorl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: xorl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: xorl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: xorl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv4i32u:
+; SSE3: # BB#0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm1, %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: xorl $31, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE3-NEXT: movd %xmm2, %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: xorl $31, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: xorl $31, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: xorl $31, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv4i32u:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSSE3-NEXT: movd %xmm1, %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: xorl $31, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSSE3-NEXT: movd %xmm2, %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: xorl $31, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: xorl $31, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: xorl $31, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv4i32u:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrd $1, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $31, %eax
+; SSE41-NEXT: movd %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: xorl $31, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrd $1, %eax, %xmm1
+; SSE41-NEXT: pextrd $2, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $31, %eax
+; SSE41-NEXT: pinsrd $2, %eax, %xmm1
+; SSE41-NEXT: pextrd $3, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $31, %eax
+; SSE41-NEXT: pinsrd $3, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv4i32u:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $31, %eax
+; AVX-NEXT: vmovd %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: xorl $31, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrd $2, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $31, %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrd $3, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $31, %eax
+; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %in, i1 -1)
+ ret <4 x i32> %out
+}
+
+define <8 x i16> @testv8i16(<8 x i16> %in) {
+; SSE2-LABEL: testv8i16:
+; SSE2: # BB#0:
+; SSE2-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %cx
+; SSE2-NEXT: movw $31, %ax
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: xorl $15, %ecx
+; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: pextrw $3, %xmm0, %ecx
+; SSE2-NEXT: bsrw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: xorl $15, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: pextrw $5, %xmm0, %ecx
+; SSE2-NEXT: bsrw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: xorl $15, %ecx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: pextrw $1, %xmm0, %ecx
+; SSE2-NEXT: bsrw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: xorl $15, %ecx
+; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: pextrw $6, %xmm0, %ecx
+; SSE2-NEXT: bsrw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: xorl $15, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: pextrw $2, %xmm0, %ecx
+; SSE2-NEXT: bsrw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: xorl $15, %ecx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: pextrw $4, %xmm0, %ecx
+; SSE2-NEXT: bsrw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: xorl $15, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: bsrw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: xorl $15, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv8i16:
+; SSE3: # BB#0:
+; SSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %cx
+; SSE3-NEXT: movw $31, %ax
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: xorl $15, %ecx
+; SSE3-NEXT: movd %ecx, %xmm1
+; SSE3-NEXT: pextrw $3, %xmm0, %ecx
+; SSE3-NEXT: bsrw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: xorl $15, %ecx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE3-NEXT: pextrw $5, %xmm0, %ecx
+; SSE3-NEXT: bsrw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: xorl $15, %ecx
+; SSE3-NEXT: movd %ecx, %xmm3
+; SSE3-NEXT: pextrw $1, %xmm0, %ecx
+; SSE3-NEXT: bsrw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: xorl $15, %ecx
+; SSE3-NEXT: movd %ecx, %xmm1
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE3-NEXT: pextrw $6, %xmm0, %ecx
+; SSE3-NEXT: bsrw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: xorl $15, %ecx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: pextrw $2, %xmm0, %ecx
+; SSE3-NEXT: bsrw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: xorl $15, %ecx
+; SSE3-NEXT: movd %ecx, %xmm3
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE3-NEXT: pextrw $4, %xmm0, %ecx
+; SSE3-NEXT: bsrw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: xorl $15, %ecx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: movd %xmm0, %ecx
+; SSE3-NEXT: bsrw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: xorl $15, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv8i16:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %cx
+; SSSE3-NEXT: movw $31, %ax
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: xorl $15, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: pextrw $3, %xmm0, %ecx
+; SSSE3-NEXT: bsrw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: xorl $15, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: pextrw $5, %xmm0, %ecx
+; SSSE3-NEXT: bsrw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: xorl $15, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: pextrw $1, %xmm0, %ecx
+; SSSE3-NEXT: bsrw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: xorl $15, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: pextrw $6, %xmm0, %ecx
+; SSSE3-NEXT: bsrw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: xorl $15, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: pextrw $2, %xmm0, %ecx
+; SSSE3-NEXT: bsrw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: xorl $15, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSSE3-NEXT: pextrw $4, %xmm0, %ecx
+; SSSE3-NEXT: bsrw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: xorl $15, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: movd %xmm0, %ecx
+; SSSE3-NEXT: bsrw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: xorl $15, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv8i16:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: bsrw %ax, %cx
+; SSE41-NEXT: movw $31, %ax
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: xorl $15, %ecx
+; SSE41-NEXT: movd %xmm0, %edx
+; SSE41-NEXT: bsrw %dx, %dx
+; SSE41-NEXT: cmovew %ax, %dx
+; SSE41-NEXT: xorl $15, %edx
+; SSE41-NEXT: movd %edx, %xmm1
+; SSE41-NEXT: pinsrw $1, %ecx, %xmm1
+; SSE41-NEXT: pextrw $2, %xmm0, %ecx
+; SSE41-NEXT: bsrw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: xorl $15, %ecx
+; SSE41-NEXT: pinsrw $2, %ecx, %xmm1
+; SSE41-NEXT: pextrw $3, %xmm0, %ecx
+; SSE41-NEXT: bsrw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: xorl $15, %ecx
+; SSE41-NEXT: pinsrw $3, %ecx, %xmm1
+; SSE41-NEXT: pextrw $4, %xmm0, %ecx
+; SSE41-NEXT: bsrw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: xorl $15, %ecx
+; SSE41-NEXT: pinsrw $4, %ecx, %xmm1
+; SSE41-NEXT: pextrw $5, %xmm0, %ecx
+; SSE41-NEXT: bsrw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: xorl $15, %ecx
+; SSE41-NEXT: pinsrw $5, %ecx, %xmm1
+; SSE41-NEXT: pextrw $6, %xmm0, %ecx
+; SSE41-NEXT: bsrw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: xorl $15, %ecx
+; SSE41-NEXT: pinsrw $6, %ecx, %xmm1
+; SSE41-NEXT: pextrw $7, %xmm0, %ecx
+; SSE41-NEXT: bsrw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: xorl $15, %ecx
+; SSE41-NEXT: pinsrw $7, %ecx, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-NEXT: bsrw %ax, %cx
+; AVX-NEXT: movw $31, %ax
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: xorl $15, %ecx
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: bsrw %dx, %dx
+; AVX-NEXT: cmovew %ax, %dx
+; AVX-NEXT: xorl $15, %edx
+; AVX-NEXT: vmovd %edx, %xmm1
+; AVX-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX-NEXT: bsrw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: xorl $15, %ecx
+; AVX-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX-NEXT: bsrw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: xorl $15, %ecx
+; AVX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX-NEXT: bsrw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: xorl $15, %ecx
+; AVX-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $5, %xmm0, %ecx
+; AVX-NEXT: bsrw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: xorl $15, %ecx
+; AVX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $6, %xmm0, %ecx
+; AVX-NEXT: bsrw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: xorl $15, %ecx
+; AVX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $7, %xmm0, %ecx
+; AVX-NEXT: bsrw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: xorl $15, %ecx
+; AVX-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 0)
+ ret <8 x i16> %out
+}
+
+define <8 x i16> @testv8i16u(<8 x i16> %in) {
+; SSE2-LABEL: testv8i16u:
+; SSE2: # BB#0:
+; SSE2-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %ax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %ax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %ax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %ax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %ax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %ax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %ax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsrw %ax, %ax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv8i16u:
+; SSE3: # BB#0:
+; SSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %ax
+; SSE3-NEXT: xorl $15, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %ax
+; SSE3-NEXT: xorl $15, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %ax
+; SSE3-NEXT: xorl $15, %eax
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %ax
+; SSE3-NEXT: xorl $15, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE3-NEXT: pextrw $6, %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %ax
+; SSE3-NEXT: xorl $15, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %ax
+; SSE3-NEXT: xorl $15, %eax
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %ax
+; SSE3-NEXT: xorl $15, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsrw %ax, %ax
+; SSE3-NEXT: xorl $15, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv8i16u:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %ax
+; SSSE3-NEXT: xorl $15, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %ax
+; SSSE3-NEXT: xorl $15, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %ax
+; SSSE3-NEXT: xorl $15, %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: pextrw $1, %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %ax
+; SSSE3-NEXT: xorl $15, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: pextrw $6, %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %ax
+; SSSE3-NEXT: xorl $15, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %ax
+; SSSE3-NEXT: xorl $15, %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %ax
+; SSSE3-NEXT: xorl $15, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsrw %ax, %ax
+; SSSE3-NEXT: xorl $15, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv8i16u:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: bsrw %ax, %ax
+; SSE41-NEXT: xorl $15, %eax
+; SSE41-NEXT: movd %xmm0, %ecx
+; SSE41-NEXT: bsrw %cx, %cx
+; SSE41-NEXT: xorl $15, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrw $1, %eax, %xmm1
+; SSE41-NEXT: pextrw $2, %xmm0, %eax
+; SSE41-NEXT: bsrw %ax, %ax
+; SSE41-NEXT: xorl $15, %eax
+; SSE41-NEXT: pinsrw $2, %eax, %xmm1
+; SSE41-NEXT: pextrw $3, %xmm0, %eax
+; SSE41-NEXT: bsrw %ax, %ax
+; SSE41-NEXT: xorl $15, %eax
+; SSE41-NEXT: pinsrw $3, %eax, %xmm1
+; SSE41-NEXT: pextrw $4, %xmm0, %eax
+; SSE41-NEXT: bsrw %ax, %ax
+; SSE41-NEXT: xorl $15, %eax
+; SSE41-NEXT: pinsrw $4, %eax, %xmm1
+; SSE41-NEXT: pextrw $5, %xmm0, %eax
+; SSE41-NEXT: bsrw %ax, %ax
+; SSE41-NEXT: xorl $15, %eax
+; SSE41-NEXT: pinsrw $5, %eax, %xmm1
+; SSE41-NEXT: pextrw $6, %xmm0, %eax
+; SSE41-NEXT: bsrw %ax, %ax
+; SSE41-NEXT: xorl $15, %eax
+; SSE41-NEXT: pinsrw $6, %eax, %xmm1
+; SSE41-NEXT: pextrw $7, %xmm0, %eax
+; SSE41-NEXT: bsrw %ax, %ax
+; SSE41-NEXT: xorl $15, %eax
+; SSE41-NEXT: pinsrw $7, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv8i16u:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-NEXT: bsrw %ax, %ax
+; AVX-NEXT: xorl $15, %eax
+; AVX-NEXT: vmovd %xmm0, %ecx
+; AVX-NEXT: bsrw %cx, %cx
+; AVX-NEXT: xorl $15, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $2, %xmm0, %eax
+; AVX-NEXT: bsrw %ax, %ax
+; AVX-NEXT: xorl $15, %eax
+; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $3, %xmm0, %eax
+; AVX-NEXT: bsrw %ax, %ax
+; AVX-NEXT: xorl $15, %eax
+; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $4, %xmm0, %eax
+; AVX-NEXT: bsrw %ax, %ax
+; AVX-NEXT: xorl $15, %eax
+; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: bsrw %ax, %ax
+; AVX-NEXT: xorl $15, %eax
+; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $6, %xmm0, %eax
+; AVX-NEXT: bsrw %ax, %ax
+; AVX-NEXT: xorl $15, %eax
+; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $7, %xmm0, %eax
+; AVX-NEXT: bsrw %ax, %ax
+; AVX-NEXT: xorl $15, %eax
+; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 -1)
+ ret <8 x i16> %out
+}
+
+define <16 x i8> @testv16i8(<16 x i8> %in) {
+; SSE2-LABEL: testv16i8:
+; SSE2: # BB#0:
+; SSE2: pushq %rbp
+; SSE2: movaps %xmm0, -24(%rsp)
+; SSE2-NEXT: movzbl -9(%rsp), %eax
+; SSE2-NEXT: bsrl %eax, %ecx
+; SSE2-NEXT: movl $15, %eax
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: movzbl -10(%rsp), %ebx
+; SSE2-NEXT: movzbl -11(%rsp), %edi
+; SSE2-NEXT: movzbl -12(%rsp), %r9d
+; SSE2-NEXT: movzbl -13(%rsp), %edx
+; SSE2-NEXT: movzbl -14(%rsp), %r11d
+; SSE2-NEXT: movzbl -15(%rsp), %esi
+; SSE2-NEXT: movzbl -16(%rsp), %r8d
+; SSE2-NEXT: movzbl -17(%rsp), %ecx
+; SSE2-NEXT: bsrl %ecx, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: bsrl %edx, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: movzbl -18(%rsp), %edx
+; SSE2-NEXT: movzbl -19(%rsp), %ecx
+; SSE2-NEXT: movzbl -20(%rsp), %r10d
+; SSE2-NEXT: movzbl -21(%rsp), %ebp
+; SSE2-NEXT: bsrl %ebp, %ebp
+; SSE2-NEXT: cmovel %eax, %ebp
+; SSE2-NEXT: xorl $7, %ebp
+; SSE2-NEXT: movd %ebp, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: bsrl %edi, %edi
+; SSE2-NEXT: cmovel %eax, %edi
+; SSE2-NEXT: xorl $7, %edi
+; SSE2-NEXT: movd %edi, %xmm1
+; SSE2-NEXT: bsrl %ecx, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: bsrl %esi, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: movzbl -22(%rsp), %esi
+; SSE2-NEXT: movzbl -23(%rsp), %ecx
+; SSE2-NEXT: bsrl %ecx, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: bsrl %ebx, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: bsrl %edx, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: bsrl %r11d, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: bsrl %esi, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: bsrl %r9d, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: bsrl %r10d, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: bsrl %r8d, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm4
+; SSE2-NEXT: movzbl -24(%rsp), %ecx
+; SSE2-NEXT: bsrl %ecx, %ecx
+; SSE2-NEXT: cmovel %eax, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv16i8:
+; SSE3: # BB#0:
+; SSE3: pushq %rbp
+; SSE3: movaps %xmm0, -24(%rsp)
+; SSE3-NEXT: movzbl -9(%rsp), %eax
+; SSE3-NEXT: bsrl %eax, %ecx
+; SSE3-NEXT: movl $15, %eax
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: movzbl -10(%rsp), %ebx
+; SSE3-NEXT: movzbl -11(%rsp), %edi
+; SSE3-NEXT: movzbl -12(%rsp), %r9d
+; SSE3-NEXT: movzbl -13(%rsp), %edx
+; SSE3-NEXT: movzbl -14(%rsp), %r11d
+; SSE3-NEXT: movzbl -15(%rsp), %esi
+; SSE3-NEXT: movzbl -16(%rsp), %r8d
+; SSE3-NEXT: movzbl -17(%rsp), %ecx
+; SSE3-NEXT: bsrl %ecx, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: bsrl %edx, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: movzbl -18(%rsp), %edx
+; SSE3-NEXT: movzbl -19(%rsp), %ecx
+; SSE3-NEXT: movzbl -20(%rsp), %r10d
+; SSE3-NEXT: movzbl -21(%rsp), %ebp
+; SSE3-NEXT: bsrl %ebp, %ebp
+; SSE3-NEXT: cmovel %eax, %ebp
+; SSE3-NEXT: xorl $7, %ebp
+; SSE3-NEXT: movd %ebp, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE3-NEXT: bsrl %edi, %edi
+; SSE3-NEXT: cmovel %eax, %edi
+; SSE3-NEXT: xorl $7, %edi
+; SSE3-NEXT: movd %edi, %xmm1
+; SSE3-NEXT: bsrl %ecx, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE3-NEXT: bsrl %esi, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm3
+; SSE3-NEXT: movzbl -22(%rsp), %esi
+; SSE3-NEXT: movzbl -23(%rsp), %ecx
+; SSE3-NEXT: bsrl %ecx, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: bsrl %ebx, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: bsrl %edx, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm3
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: bsrl %r11d, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: bsrl %esi, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE3-NEXT: bsrl %r9d, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: bsrl %r10d, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm3
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: bsrl %r8d, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm4
+; SSE3-NEXT: movzbl -24(%rsp), %ecx
+; SSE3-NEXT: bsrl %ecx, %ecx
+; SSE3-NEXT: cmovel %eax, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE3-NEXT: popq %rbx
+; SSE3-NEXT: popq %rbp
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv16i8:
+; SSSE3: # BB#0:
+; SSSE3: pushq %rbp
+; SSSE3: movaps %xmm0, -24(%rsp)
+; SSSE3-NEXT: movzbl -9(%rsp), %eax
+; SSSE3-NEXT: bsrl %eax, %ecx
+; SSSE3-NEXT: movl $15, %eax
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: movzbl -10(%rsp), %ebx
+; SSSE3-NEXT: movzbl -11(%rsp), %edi
+; SSSE3-NEXT: movzbl -12(%rsp), %r9d
+; SSSE3-NEXT: movzbl -13(%rsp), %edx
+; SSSE3-NEXT: movzbl -14(%rsp), %r11d
+; SSSE3-NEXT: movzbl -15(%rsp), %esi
+; SSSE3-NEXT: movzbl -16(%rsp), %r8d
+; SSSE3-NEXT: movzbl -17(%rsp), %ecx
+; SSSE3-NEXT: bsrl %ecx, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: bsrl %edx, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: movzbl -18(%rsp), %edx
+; SSSE3-NEXT: movzbl -19(%rsp), %ecx
+; SSSE3-NEXT: movzbl -20(%rsp), %r10d
+; SSSE3-NEXT: movzbl -21(%rsp), %ebp
+; SSSE3-NEXT: bsrl %ebp, %ebp
+; SSSE3-NEXT: cmovel %eax, %ebp
+; SSSE3-NEXT: xorl $7, %ebp
+; SSSE3-NEXT: movd %ebp, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: bsrl %edi, %edi
+; SSSE3-NEXT: cmovel %eax, %edi
+; SSSE3-NEXT: xorl $7, %edi
+; SSSE3-NEXT: movd %edi, %xmm1
+; SSSE3-NEXT: bsrl %ecx, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSSE3-NEXT: bsrl %esi, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: movzbl -22(%rsp), %esi
+; SSSE3-NEXT: movzbl -23(%rsp), %ecx
+; SSSE3-NEXT: bsrl %ecx, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: bsrl %ebx, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: bsrl %edx, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: bsrl %r11d, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: bsrl %esi, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSSE3-NEXT: bsrl %r9d, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: bsrl %r10d, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: bsrl %r8d, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm4
+; SSSE3-NEXT: movzbl -24(%rsp), %ecx
+; SSSE3-NEXT: bsrl %ecx, %ecx
+; SSSE3-NEXT: cmovel %eax, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: popq %rbx
+; SSSE3-NEXT: popq %rbp
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv16i8:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %ecx
+; SSE41-NEXT: movl $15, %eax
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pextrb $0, %xmm0, %edx
+; SSE41-NEXT: bsrl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: xorl $7, %edx
+; SSE41-NEXT: movd %edx, %xmm1
+; SSE41-NEXT: pinsrb $1, %ecx, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $2, %ecx, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $3, %ecx, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $4, %ecx, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $5, %ecx, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $6, %ecx, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $7, %ecx, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $8, %ecx, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $9, %ecx, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $10, %ecx, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $11, %ecx, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $12, %ecx, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $13, %ecx, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $14, %ecx, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: cmovel %eax, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: pinsrb $15, %ecx, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %ecx
+; AVX-NEXT: movl $15, %eax
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpextrb $0, %xmm0, %edx
+; AVX-NEXT: bsrl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: xorl $7, %edx
+; AVX-NEXT: vmovd %edx, %xmm1
+; AVX-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $4, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $5, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $6, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $8, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: cmovel %eax, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vpinsrb $15, %ecx, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0)
+ ret <16 x i8> %out
+}
+
+define <16 x i8> @testv16i8u(<16 x i8> %in) {
+; SSE2-LABEL: testv16i8u:
+; SSE2: # BB#0:
+; SSE2: pushq %rbx
+; SSE2: movaps %xmm0, -16(%rsp)
+; SSE2-NEXT: movzbl -1(%rsp), %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzbl -2(%rsp), %edi
+; SSE2-NEXT: movzbl -3(%rsp), %edx
+; SSE2-NEXT: movzbl -4(%rsp), %r9d
+; SSE2-NEXT: movzbl -5(%rsp), %eax
+; SSE2-NEXT: movzbl -6(%rsp), %r10d
+; SSE2-NEXT: movzbl -7(%rsp), %ecx
+; SSE2-NEXT: movzbl -8(%rsp), %r8d
+; SSE2-NEXT: movzbl -9(%rsp), %esi
+; SSE2-NEXT: bsrl %esi, %esi
+; SSE2-NEXT: xorl $7, %esi
+; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzbl -10(%rsp), %eax
+; SSE2-NEXT: movzbl -11(%rsp), %esi
+; SSE2-NEXT: movzbl -12(%rsp), %r11d
+; SSE2-NEXT: movzbl -13(%rsp), %ebx
+; SSE2-NEXT: bsrl %ebx, %ebx
+; SSE2-NEXT: xorl $7, %ebx
+; SSE2-NEXT: movd %ebx, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: bsrl %edx, %edx
+; SSE2-NEXT: xorl $7, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: bsrl %esi, %edx
+; SSE2-NEXT: xorl $7, %edx
+; SSE2-NEXT: movd %edx, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: bsrl %ecx, %ecx
+; SSE2-NEXT: xorl $7, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: movzbl -14(%rsp), %ecx
+; SSE2-NEXT: movzbl -15(%rsp), %edx
+; SSE2-NEXT: bsrl %edx, %edx
+; SSE2-NEXT: xorl $7, %edx
+; SSE2-NEXT: movd %edx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: bsrl %edi, %edx
+; SSE2-NEXT: xorl $7, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: bsrl %r10d, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: bsrl %ecx, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: bsrl %r9d, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: bsrl %r11d, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: bsrl %r8d, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movzbl -16(%rsp), %eax
+; SSE2-NEXT: bsrl %eax, %eax
+; SSE2-NEXT: xorl $7, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv16i8u:
+; SSE3: # BB#0:
+; SSE3: pushq %rbx
+; SSE3: movaps %xmm0, -16(%rsp)
+; SSE3-NEXT: movzbl -1(%rsp), %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: movzbl -2(%rsp), %edi
+; SSE3-NEXT: movzbl -3(%rsp), %edx
+; SSE3-NEXT: movzbl -4(%rsp), %r9d
+; SSE3-NEXT: movzbl -5(%rsp), %eax
+; SSE3-NEXT: movzbl -6(%rsp), %r10d
+; SSE3-NEXT: movzbl -7(%rsp), %ecx
+; SSE3-NEXT: movzbl -8(%rsp), %r8d
+; SSE3-NEXT: movzbl -9(%rsp), %esi
+; SSE3-NEXT: bsrl %esi, %esi
+; SSE3-NEXT: xorl $7, %esi
+; SSE3-NEXT: movd %esi, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: movzbl -10(%rsp), %eax
+; SSE3-NEXT: movzbl -11(%rsp), %esi
+; SSE3-NEXT: movzbl -12(%rsp), %r11d
+; SSE3-NEXT: movzbl -13(%rsp), %ebx
+; SSE3-NEXT: bsrl %ebx, %ebx
+; SSE3-NEXT: xorl $7, %ebx
+; SSE3-NEXT: movd %ebx, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE3-NEXT: bsrl %edx, %edx
+; SSE3-NEXT: xorl $7, %edx
+; SSE3-NEXT: movd %edx, %xmm0
+; SSE3-NEXT: bsrl %esi, %edx
+; SSE3-NEXT: xorl $7, %edx
+; SSE3-NEXT: movd %edx, %xmm3
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: bsrl %ecx, %ecx
+; SSE3-NEXT: xorl $7, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: movzbl -14(%rsp), %ecx
+; SSE3-NEXT: movzbl -15(%rsp), %edx
+; SSE3-NEXT: bsrl %edx, %edx
+; SSE3-NEXT: xorl $7, %edx
+; SSE3-NEXT: movd %edx, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE3-NEXT: bsrl %edi, %edx
+; SSE3-NEXT: xorl $7, %edx
+; SSE3-NEXT: movd %edx, %xmm0
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: bsrl %r10d, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: bsrl %ecx, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE3-NEXT: bsrl %r9d, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: bsrl %r11d, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: bsrl %r8d, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm4
+; SSE3-NEXT: movzbl -16(%rsp), %eax
+; SSE3-NEXT: bsrl %eax, %eax
+; SSE3-NEXT: xorl $7, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE3-NEXT: popq %rbx
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv16i8u:
+; SSSE3: # BB#0:
+; SSSE3: pushq %rbx
+; SSSE3: movaps %xmm0, -16(%rsp)
+; SSSE3-NEXT: movzbl -1(%rsp), %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzbl -2(%rsp), %edi
+; SSSE3-NEXT: movzbl -3(%rsp), %edx
+; SSSE3-NEXT: movzbl -4(%rsp), %r9d
+; SSSE3-NEXT: movzbl -5(%rsp), %eax
+; SSSE3-NEXT: movzbl -6(%rsp), %r10d
+; SSSE3-NEXT: movzbl -7(%rsp), %ecx
+; SSSE3-NEXT: movzbl -8(%rsp), %r8d
+; SSSE3-NEXT: movzbl -9(%rsp), %esi
+; SSSE3-NEXT: bsrl %esi, %esi
+; SSSE3-NEXT: xorl $7, %esi
+; SSSE3-NEXT: movd %esi, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzbl -10(%rsp), %eax
+; SSSE3-NEXT: movzbl -11(%rsp), %esi
+; SSSE3-NEXT: movzbl -12(%rsp), %r11d
+; SSSE3-NEXT: movzbl -13(%rsp), %ebx
+; SSSE3-NEXT: bsrl %ebx, %ebx
+; SSSE3-NEXT: xorl $7, %ebx
+; SSSE3-NEXT: movd %ebx, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSSE3-NEXT: bsrl %edx, %edx
+; SSSE3-NEXT: xorl $7, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: bsrl %esi, %edx
+; SSSE3-NEXT: xorl $7, %edx
+; SSSE3-NEXT: movd %edx, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: bsrl %ecx, %ecx
+; SSSE3-NEXT: xorl $7, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: movzbl -14(%rsp), %ecx
+; SSSE3-NEXT: movzbl -15(%rsp), %edx
+; SSSE3-NEXT: bsrl %edx, %edx
+; SSSE3-NEXT: xorl $7, %edx
+; SSSE3-NEXT: movd %edx, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: bsrl %edi, %edx
+; SSSE3-NEXT: xorl $7, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: bsrl %r10d, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: bsrl %ecx, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSSE3-NEXT: bsrl %r9d, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: bsrl %r11d, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: bsrl %r8d, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm4
+; SSSE3-NEXT: movzbl -16(%rsp), %eax
+; SSSE3-NEXT: bsrl %eax, %eax
+; SSSE3-NEXT: xorl $7, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: popq %rbx
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv16i8u:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pextrb $0, %xmm0, %ecx
+; SSE41-NEXT: bsrl %ecx, %ecx
+; SSE41-NEXT: xorl $7, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrb $1, %eax, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: bsrl %eax, %eax
+; SSE41-NEXT: xorl $7, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv16i8u:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX-NEXT: bsrl %ecx, %ecx
+; AVX-NEXT: xorl $7, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: bsrl %eax, %eax
+; AVX-NEXT: xorl $7, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1)
+ ret <16 x i8> %out
+}
+
+define <2 x i64> @foldv2i64() {
+; SSE-LABEL: foldv2i64:
+; SSE: # BB#0:
+; SSE-NEXT: movl $55, %eax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv2i64:
+; AVX: # BB#0:
+; AVX-NEXT: movl $55, %eax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
+ ret <2 x i64> %out
+}
+
+define <2 x i64> @foldv2i64u() {
+; SSE-LABEL: foldv2i64u:
+; SSE: # BB#0:
+; SSE-NEXT: movl $55, %eax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv2i64u:
+; AVX: # BB#0:
+; AVX-NEXT: movl $55, %eax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
+ ret <2 x i64> %out
+}
+
+define <4 x i32> @foldv4i32() {
+; SSE-LABEL: foldv4i32:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 0)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @foldv4i32u() {
+; SSE-LABEL: foldv4i32u:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv4i32u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 -1)
+ ret <4 x i32> %out
+}
+
+define <8 x i16> @foldv8i16() {
+; SSE-LABEL: foldv8i16:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 0)
+ ret <8 x i16> %out
+}
+
+define <8 x i16> @foldv8i16u() {
+; SSE-LABEL: foldv8i16u:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv8i16u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 -1)
+ ret <8 x i16> %out
+}
+
+define <16 x i8> @foldv16i8() {
+; SSE-LABEL: foldv16i8:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 0)
+ ret <16 x i8> %out
+}
+
+define <16 x i8> @foldv16i8u() {
+; SSE-LABEL: foldv16i8u:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv16i8u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 -1)
+ ret <16 x i8> %out
+}
+
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1)
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1)
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1)
diff --git a/test/CodeGen/X86/vector-lzcnt-256.ll b/test/CodeGen/X86/vector-lzcnt-256.ll
new file mode 100644
index 0000000000000..48abe1290528d
--- /dev/null
+++ b/test/CodeGen/X86/vector-lzcnt-256.ll
@@ -0,0 +1,1305 @@
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i64> @testv4i64(<4 x i64> %in) {
+; AVX1-LABEL: testv4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: bsrq %rax, %rax
+; AVX1-NEXT: movl $127, %ecx
+; AVX1-NEXT: cmoveq %rcx, %rax
+; AVX1-NEXT: xorq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: bsrq %rax, %rax
+; AVX1-NEXT: cmoveq %rcx, %rax
+; AVX1-NEXT: xorq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: bsrq %rax, %rax
+; AVX1-NEXT: cmoveq %rcx, %rax
+; AVX1-NEXT: xorq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: bsrq %rax, %rax
+; AVX1-NEXT: cmoveq %rcx, %rax
+; AVX1-NEXT: xorq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: bsrq %rax, %rax
+; AVX2-NEXT: movl $127, %ecx
+; AVX2-NEXT: cmoveq %rcx, %rax
+; AVX2-NEXT: xorq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: bsrq %rax, %rax
+; AVX2-NEXT: cmoveq %rcx, %rax
+; AVX2-NEXT: xorq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: bsrq %rax, %rax
+; AVX2-NEXT: cmoveq %rcx, %rax
+; AVX2-NEXT: xorq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: bsrq %rax, %rax
+; AVX2-NEXT: cmoveq %rcx, %rax
+; AVX2-NEXT: xorq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 0)
+ ret <4 x i64> %out
+}
+
+define <4 x i64> @testv4i64u(<4 x i64> %in) {
+; AVX1-LABEL: testv4i64u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: bsrq %rax, %rax
+; AVX1-NEXT: xorq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: bsrq %rax, %rax
+; AVX1-NEXT: xorq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: bsrq %rax, %rax
+; AVX1-NEXT: xorq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: bsrq %rax, %rax
+; AVX1-NEXT: xorq $63, %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv4i64u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: bsrq %rax, %rax
+; AVX2-NEXT: xorq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: bsrq %rax, %rax
+; AVX2-NEXT: xorq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: bsrq %rax, %rax
+; AVX2-NEXT: xorq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: bsrq %rax, %rax
+; AVX2-NEXT: xorq $63, %rax
+; AVX2-NEXT: vmovq %rax, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 -1)
+ ret <4 x i64> %out
+}
+
+define <8 x i32> @testv8i32(<8 x i32> %in) {
+; AVX1-LABEL: testv8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %ecx
+; AVX1-NEXT: movl $63, %eax
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $31, %ecx
+; AVX1-NEXT: vmovd %xmm1, %edx
+; AVX1-NEXT: bsrl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: xorl $31, %edx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $31, %ecx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $31, %ecx
+; AVX1-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $31, %ecx
+; AVX1-NEXT: vmovd %xmm0, %edx
+; AVX1-NEXT: bsrl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: xorl $31, %edx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $31, %ecx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $31, %ecx
+; AVX1-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrd $1, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %ecx
+; AVX2-NEXT: movl $63, %eax
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $31, %ecx
+; AVX2-NEXT: vmovd %xmm1, %edx
+; AVX2-NEXT: bsrl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: xorl $31, %edx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $31, %ecx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $31, %ecx
+; AVX2-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1
+; AVX2-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $31, %ecx
+; AVX2-NEXT: vmovd %xmm0, %edx
+; AVX2-NEXT: bsrl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: xorl $31, %edx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $2, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $31, %ecx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $31, %ecx
+; AVX2-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 0)
+ ret <8 x i32> %out
+}
+
+define <8 x i32> @testv8i32u(<8 x i32> %in) {
+; AVX1-LABEL: testv8i32u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $31, %eax
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: xorl $31, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $31, %eax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $31, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $31, %eax
+; AVX1-NEXT: vmovd %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: xorl $31, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $31, %eax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $31, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i32u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrd $1, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $31, %eax
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: xorl $31, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $2, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $31, %eax
+; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $3, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $31, %eax
+; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
+; AVX2-NEXT: vpextrd $1, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $31, %eax
+; AVX2-NEXT: vmovd %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: xorl $31, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $2, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $31, %eax
+; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $3, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $31, %eax
+; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 -1)
+ ret <8 x i32> %out
+}
+
+define <16 x i16> @testv16i16(<16 x i16> %in) {
+; AVX1-LABEL: testv16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm1, %eax
+; AVX1-NEXT: bsrw %ax, %cx
+; AVX1-NEXT: movw $31, %ax
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vmovd %xmm1, %edx
+; AVX1-NEXT: bsrw %dx, %dx
+; AVX1-NEXT: cmovew %ax, %dx
+; AVX1-NEXT: xorl $15, %edx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm1, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm1, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm1, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm1, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm1, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm1, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vmovd %xmm0, %edx
+; AVX1-NEXT: bsrw %dx, %dx
+; AVX1-NEXT: cmovew %ax, %dx
+; AVX1-NEXT: xorl $15, %edx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm0, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm0, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm0, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm1, %eax
+; AVX2-NEXT: bsrw %ax, %cx
+; AVX2-NEXT: movw $31, %ax
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vmovd %xmm1, %edx
+; AVX2-NEXT: bsrw %dx, %dx
+; AVX2-NEXT: cmovew %ax, %dx
+; AVX2-NEXT: xorl $15, %edx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm1, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm1, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm1, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm1, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm1, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm1, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vmovd %xmm0, %edx
+; AVX2-NEXT: bsrw %dx, %dx
+; AVX2-NEXT: cmovew %ax, %dx
+; AVX2-NEXT: xorl $15, %edx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm0, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm0, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm0, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 0)
+ ret <16 x i16> %out
+}
+
+define <16 x i16> @testv16i16u(<16 x i16> %in) {
+; AVX1-LABEL: testv16i16u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm1, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm1, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm1, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm1, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm1, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm1, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm1, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vmovd %xmm0, %ecx
+; AVX1-NEXT: bsrw %cx, %cx
+; AVX1-NEXT: xorl $15, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm0, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm0, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm0, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm0, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm0, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm0, %eax
+; AVX1-NEXT: bsrw %ax, %ax
+; AVX1-NEXT: xorl $15, %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i16u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm1, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm1, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm1, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm1, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm1, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm1, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm1, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vmovd %xmm0, %ecx
+; AVX2-NEXT: bsrw %cx, %cx
+; AVX2-NEXT: xorl $15, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm0, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm0, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm0, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm0, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm0, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm0, %eax
+; AVX2-NEXT: bsrw %ax, %ax
+; AVX2-NEXT: xorl $15, %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 -1)
+ ret <16 x i16> %out
+}
+
+define <32 x i8> @testv32i8(<32 x i8> %in) {
+; AVX1-LABEL: testv32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %ecx
+; AVX1-NEXT: movl $15, %eax
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpextrb $0, %xmm1, %edx
+; AVX1-NEXT: bsrl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: xorl $7, %edx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $5, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $6, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $9, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $10, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $12, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $13, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $15, %ecx, %xmm2, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpextrb $0, %xmm0, %edx
+; AVX1-NEXT: bsrl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: xorl $7, %edx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $5, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $6, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $9, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $10, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $12, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $13, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vpinsrb $15, %ecx, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %ecx
+; AVX2-NEXT: movl $15, %eax
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpextrb $0, %xmm1, %edx
+; AVX2-NEXT: bsrl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: xorl $7, %edx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $5, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $6, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $15, %ecx, %xmm2, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpextrb $0, %xmm0, %edx
+; AVX2-NEXT: bsrl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: xorl $7, %edx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $5, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $6, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vpinsrb $15, %ecx, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0)
+ ret <32 x i8> %out
+}
+
+define <32 x i8> @testv32i8u(<32 x i8> %in) {
+; AVX1-LABEL: testv32i8u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: bsrl %ecx, %ecx
+; AVX1-NEXT: xorl $7, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: bsrl %eax, %eax
+; AVX1-NEXT: xorl $7, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv32i8u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: bsrl %ecx, %ecx
+; AVX2-NEXT: xorl $7, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: bsrl %eax, %eax
+; AVX2-NEXT: xorl $7, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1)
+ ret <32 x i8> %out
+}
+
+define <4 x i64> @foldv4i64() {
+; AVX-LABEL: foldv4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; AVX-NEXT: retq
+ %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
+ ret <4 x i64> %out
+}
+
+define <4 x i64> @foldv4i64u() {
+; AVX-LABEL: foldv4i64u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; AVX-NEXT: retq
+ %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
+ ret <4 x i64> %out
+}
+
+define <8 x i32> @foldv8i32() {
+; AVX-LABEL: foldv8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; AVX-NEXT: retq
+ %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
+ ret <8 x i32> %out
+}
+
+define <8 x i32> @foldv8i32u() {
+; AVX-LABEL: foldv8i32u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; AVX-NEXT: retq
+ %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
+ ret <8 x i32> %out
+}
+
+define <16 x i16> @foldv16i16() {
+; AVX-LABEL: foldv16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX-NEXT: retq
+ %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
+ ret <16 x i16> %out
+}
+
+define <16 x i16> @foldv16i16u() {
+; AVX-LABEL: foldv16i16u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX-NEXT: retq
+ %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
+ ret <16 x i16> %out
+}
+
+define <32 x i8> @foldv32i8() {
+; AVX-LABEL: foldv32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX-NEXT: retq
+ %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
+ ret <32 x i8> %out
+}
+
+define <32 x i8> @foldv32i8u() {
+; AVX-LABEL: foldv32i8u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX-NEXT: retq
+ %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
+ ret <32 x i8> %out
+}
+
+declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1)
+declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1)
+declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1)
+declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1)
diff --git a/test/CodeGen/X86/vector-popcnt-128.ll b/test/CodeGen/X86/vector-popcnt-128.ll
new file mode 100644
index 0000000000000..fef445de04ab8
--- /dev/null
+++ b/test/CodeGen/X86/vector-popcnt-128.ll
@@ -0,0 +1,462 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+target triple = "x86_64-unknown-unknown"
+
+define <2 x i64> @testv2i64(<2 x i64> %in) {
+; SSE2-LABEL: testv2i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3689348814741910323,3689348814741910323]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: psrlq $2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlq $4, %xmm1
+; SSE2-NEXT: paddq %xmm0, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv2i64:
+; SSE3: # BB#0:
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlq $1, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: psubq %xmm1, %xmm0
+; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [3689348814741910323,3689348814741910323]
+; SSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE3-NEXT: pand %xmm1, %xmm2
+; SSE3-NEXT: psrlq $2, %xmm0
+; SSE3-NEXT: pand %xmm1, %xmm0
+; SSE3-NEXT: paddq %xmm2, %xmm0
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlq $4, %xmm1
+; SSE3-NEXT: paddq %xmm0, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: pxor %xmm0, %xmm0
+; SSE3-NEXT: psadbw %xmm0, %xmm1
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv2i64:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pand %xmm1, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pshufb %xmm2, %xmm4
+; SSSE3-NEXT: psrlw $4, %xmm0
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pshufb %xmm0, %xmm3
+; SSSE3-NEXT: paddb %xmm4, %xmm3
+; SSSE3-NEXT: pxor %xmm0, %xmm0
+; SSSE3-NEXT: psadbw %xmm3, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv2i64:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pand %xmm1, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pshufb %xmm2, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pshufb %xmm0, %xmm3
+; SSE41-NEXT: paddb %xmm4, %xmm3
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: psadbw %xmm3, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %in)
+ ret <2 x i64> %out
+}
+
+define <4 x i32> @testv4i32(<4 x i32> %in) {
+; SSE2-LABEL: testv4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psubd %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [858993459,858993459,858993459,858993459]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: psrld $2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $4, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: psadbw %xmm0, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv4i32:
+; SSE3: # BB#0:
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrld $1, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: psubd %xmm1, %xmm0
+; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [858993459,858993459,858993459,858993459]
+; SSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE3-NEXT: pand %xmm1, %xmm2
+; SSE3-NEXT: psrld $2, %xmm0
+; SSE3-NEXT: pand %xmm1, %xmm0
+; SSE3-NEXT: paddd %xmm2, %xmm0
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrld $4, %xmm1
+; SSE3-NEXT: paddd %xmm0, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: pxor %xmm0, %xmm0
+; SSE3-NEXT: movdqa %xmm1, %xmm2
+; SSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE3-NEXT: psadbw %xmm0, %xmm2
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT: psadbw %xmm0, %xmm1
+; SSE3-NEXT: packuswb %xmm2, %xmm1
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv4i32:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
+; SSSE3-NEXT: pshufb %xmm3, %xmm4
+; SSSE3-NEXT: psrlw $4, %xmm0
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: pshufb %xmm0, %xmm1
+; SSSE3-NEXT: paddb %xmm4, %xmm1
+; SSSE3-NEXT: pxor %xmm0, %xmm0
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSSE3-NEXT: psadbw %xmm0, %xmm2
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT: psadbw %xmm0, %xmm1
+; SSSE3-NEXT: packuswb %xmm2, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT: movdqa %xmm1, %xmm4
+; SSE41-NEXT: pshufb %xmm3, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pshufb %xmm0, %xmm1
+; SSE41-NEXT: paddb %xmm4, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE41-NEXT: psadbw %xmm0, %xmm2
+; SSE41-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE41-NEXT: psadbw %xmm0, %xmm1
+; SSE41-NEXT: packuswb %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX-NEXT: vpsadbw %xmm2, %xmm1, %xmm2
+; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %in)
+ ret <4 x i32> %out
+}
+
+define <8 x i16> @testv8i16(<8 x i16> %in) {
+; SSE2-LABEL: testv8i16:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psubw %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psllw $8, %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv8i16:
+; SSE3: # BB#0:
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlw $1, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: psubw %xmm1, %xmm0
+; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE3-NEXT: pand %xmm1, %xmm2
+; SSE3-NEXT: psrlw $2, %xmm0
+; SSE3-NEXT: pand %xmm1, %xmm0
+; SSE3-NEXT: paddw %xmm2, %xmm0
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlw $4, %xmm1
+; SSE3-NEXT: paddw %xmm0, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: psllw $8, %xmm0
+; SSE3-NEXT: paddb %xmm1, %xmm0
+; SSE3-NEXT: psrlw $8, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv8i16:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pand %xmm1, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pshufb %xmm2, %xmm4
+; SSSE3-NEXT: psrlw $4, %xmm0
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pshufb %xmm0, %xmm3
+; SSSE3-NEXT: paddb %xmm4, %xmm3
+; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: psllw $8, %xmm0
+; SSSE3-NEXT: paddb %xmm3, %xmm0
+; SSSE3-NEXT: psrlw $8, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv8i16:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pand %xmm1, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pshufb %xmm2, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pshufb %xmm0, %xmm3
+; SSE41-NEXT: paddb %xmm4, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: psllw $8, %xmm0
+; SSE41-NEXT: paddb %xmm3, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %in)
+ ret <8 x i16> %out
+}
+
+define <16 x i8> @testv16i8(<16 x i8> %in) {
+; SSE2-LABEL: testv16i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psubb %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: paddb %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: paddb %xmm0, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv16i8:
+; SSE3: # BB#0:
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlw $1, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: psubb %xmm1, %xmm0
+; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; SSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE3-NEXT: pand %xmm1, %xmm2
+; SSE3-NEXT: psrlw $2, %xmm0
+; SSE3-NEXT: pand %xmm1, %xmm0
+; SSE3-NEXT: paddb %xmm2, %xmm0
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlw $4, %xmm1
+; SSE3-NEXT: paddb %xmm0, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv16i8:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
+; SSSE3-NEXT: pshufb %xmm3, %xmm4
+; SSSE3-NEXT: psrlw $4, %xmm0
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: pshufb %xmm0, %xmm1
+; SSSE3-NEXT: paddb %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv16i8:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; SSE41-NEXT: movdqa %xmm1, %xmm4
+; SSE41-NEXT: pshufb %xmm3, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pshufb %xmm0, %xmm1
+; SSE41-NEXT: paddb %xmm4, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %in)
+ ret <16 x i8> %out
+}
+
+define <2 x i64> @foldv2i64() {
+; SSE-LABEL: foldv2i64:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,64]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,64]
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> <i64 256, i64 -1>)
+ ret <2 x i64> %out
+}
+
+define <4 x i32> @foldv4i32() {
+; SSE-LABEL: foldv4i32:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,32,0,8]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,32,0,8]
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>)
+ ret <4 x i32> %out
+}
+
+define <8 x i16> @foldv8i16() {
+; SSE-LABEL: foldv8i16:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,16,0,8,0,3,2,3]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,16,0,8,0,3,2,3]
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>)
+ ret <8 x i16> %out
+}
+
+define <16 x i8> @foldv16i8() {
+; SSE-LABEL: foldv16i8:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1]
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>)
+ ret <16 x i8> %out
+}
+
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
diff --git a/test/CodeGen/X86/vector-popcnt-256.ll b/test/CodeGen/X86/vector-popcnt-256.ll
new file mode 100644
index 0000000000000..7ce4f712483a2
--- /dev/null
+++ b/test/CodeGen/X86/vector-popcnt-256.ll
@@ -0,0 +1,220 @@
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i64> @testv4i64(<4 x i64> %in) {
+; AVX1-LABEL: testv4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsadbw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm5
+; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpaddb %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpsadbw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsadbw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+ %out = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %in)
+ ret <4 x i64> %out
+}
+
+define <8 x i32> @testv8i32(<8 x i32> %in) {
+; AVX1-LABEL: testv8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-NEXT: vpsadbw %xmm5, %xmm3, %xmm5
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; AVX1-NEXT: vpsadbw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm5
+; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpaddb %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX1-NEXT: vpsadbw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; AVX1-NEXT: vpsadbw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX2-NEXT: vpsadbw %ymm2, %ymm1, %ymm2
+; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX2-NEXT: vpsadbw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %in)
+ ret <8 x i32> %out
+}
+
+define <16 x i16> @testv16i16(<16 x i16> %in) {
+; AVX1-LABEL: testv16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4
+; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm4
+; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsllw $8, %xmm2, %xmm4
+; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm4
+; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm4
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsllw $8, %ymm0, %ymm1
+; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %in)
+ ret <16 x i16> %out
+}
+
+define <32 x i8> @testv32i8(<32 x i8> %in) {
+; AVX1-LABEL: testv32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %in)
+ ret <32 x i8> %out
+}
+
+define <4 x i64> @foldv4i64() {
+; AVX-LABEL: foldv4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,64,0,8]
+; AVX-NEXT: retq
+ %out = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>)
+ ret <4 x i64> %out
+}
+
+define <8 x i32> @foldv8i32() {
+; AVX-LABEL: foldv8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,32,0,8,16,3,2,3]
+; AVX-NEXT: retq
+ %out = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>)
+ ret <8 x i32> %out
+}
+
+define <16 x i16> @foldv16i16() {
+; AVX-LABEL: foldv16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,16,0,8,0,3,2,3,15,7,1,1,1,1,1,1]
+; AVX-NEXT: retq
+ %out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>)
+ ret <16 x i16> %out
+}
+
+define <32 x i8> @foldv32i8() {
+; AVX-LABEL: foldv32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,8,0,8,0,3,2,3,7,7,1,1,1,1,1,1,1,1,0,0,1,2,3,4,5,6,7,8,2,2,3,7]
+; AVX-NEXT: retq
+ %out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>)
+ ret <32 x i8> %out
+}
+
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
+declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
+declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>)
diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 1b42a637907c7..944ec4b8d3ac7 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -843,7 +843,6 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
; ALL-LABEL: insert_reg_and_zero_v4f64:
; ALL: # BB#0:
-; ALL-NEXT: # kill: XMM0<def> XMM0<kill> YMM0<def>
; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll
index 62d4af7809b6b..8dc76231856a2 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -15,9 +15,8 @@ define <8 x double> @shuffle_v8f64_00000000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00000010(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00000010:
; ALL: # BB#0:
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm1
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 0>
ret <8 x double> %shuffle
@@ -26,9 +25,8 @@ define <8 x double> @shuffle_v8f64_00000010(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00000200(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00000200:
; ALL: # BB#0:
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm1
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 0>
ret <8 x double> %shuffle
@@ -37,9 +35,8 @@ define <8 x double> @shuffle_v8f64_00000200(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00003000(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00003000:
; ALL: # BB#0:
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm1
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0>
ret <8 x double> %shuffle
@@ -48,11 +45,8 @@ define <8 x double> @shuffle_v8f64_00003000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00040000(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00040000:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vbroadcastsd %xmm1, %ymm1
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 4, i32 0, i32 0, i32 0, i32 0>
ret <8 x double> %shuffle
@@ -61,11 +55,8 @@ define <8 x double> @shuffle_v8f64_00040000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00500000(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00500000:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,0]
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <8 x double> %shuffle
@@ -74,11 +65,8 @@ define <8 x double> @shuffle_v8f64_00500000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_06000000(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_06000000:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,0]
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <8 x double> %shuffle
@@ -87,11 +75,11 @@ define <8 x double> @shuffle_v8f64_06000000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_70000000(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_70000000:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,0,0,0]
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; ALL-NEXT: movl $7, %eax
+; ALL-NEXT: vpinsrq $0, %rax, %xmm1, %xmm2
+; ALL-NEXT: vinserti32x4 $0, %xmm2, %zmm1, %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <8 x double> %shuffle
@@ -100,10 +88,7 @@ define <8 x double> @shuffle_v8f64_70000000(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01014545(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_01014545:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
-; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermpd $68, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>
ret <8 x double> %shuffle
@@ -112,9 +97,8 @@ define <8 x double> @shuffle_v8f64_01014545(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00112233(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00112233:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[0,0,1,1]
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
ret <8 x double> %shuffle
@@ -123,9 +107,8 @@ define <8 x double> @shuffle_v8f64_00112233(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00001111(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00001111:
; ALL: # BB#0:
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm1
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,1,1]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1>
ret <8 x double> %shuffle
@@ -134,11 +117,7 @@ define <8 x double> @shuffle_v8f64_00001111(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_81a3c5e7(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_81a3c5e7:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vshufpd $170, %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
ret <8 x double> %shuffle
@@ -147,10 +126,9 @@ define <8 x double> @shuffle_v8f64_81a3c5e7(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08080808(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_08080808:
; ALL: # BB#0:
-; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; ALL-NEXT: vbroadcastsd %xmm1, %ymm1
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8>
ret <8 x double> %shuffle
@@ -159,15 +137,9 @@ define <8 x double> @shuffle_v8f64_08080808(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08084c4c(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_08084c4c:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vbroadcastsd %xmm3, %ymm3
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3]
-; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; ALL-NEXT: vbroadcastsd %xmm1, %ymm1
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 8, i32 0, i32 8, i32 4, i32 12, i32 4, i32 12>
ret <8 x double> %shuffle
@@ -176,13 +148,9 @@ define <8 x double> @shuffle_v8f64_08084c4c(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_8823cc67(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_8823cc67:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vbroadcastsd %xmm3, %ymm3
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; ALL-NEXT: vbroadcastsd %xmm1, %ymm1
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 8, i32 8, i32 2, i32 3, i32 12, i32 12, i32 6, i32 7>
ret <8 x double> %shuffle
@@ -191,13 +159,9 @@ define <8 x double> @shuffle_v8f64_8823cc67(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_9832dc76(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_9832dc76:
; ALL: # BB#0:
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2,3]
-; ALL-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[1,0,3,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm1
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 9, i32 8, i32 3, i32 2, i32 13, i32 12, i32 7, i32 6>
ret <8 x double> %shuffle
@@ -206,13 +170,9 @@ define <8 x double> @shuffle_v8f64_9832dc76(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_9810dc54(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_9810dc54:
; ALL: # BB#0:
-; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm2
-; ALL-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[1,0,3,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm1
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 9, i32 8, i32 1, i32 0, i32 13, i32 12, i32 5, i32 4>
ret <8 x double> %shuffle
@@ -221,15 +181,9 @@ define <8 x double> @shuffle_v8f64_9810dc54(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08194c5d(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_08194c5d:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm3
-; ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,1,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
ret <8 x double> %shuffle
@@ -238,15 +192,9 @@ define <8 x double> @shuffle_v8f64_08194c5d(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_2a3b6e7f(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_2a3b6e7f:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm3
-; ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,1,3,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
ret <8 x double> %shuffle
@@ -255,13 +203,9 @@ define <8 x double> @shuffle_v8f64_2a3b6e7f(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08192a3b(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_08192a3b:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm1[0,2,2,3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[2,1,3,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
ret <8 x double> %shuffle
@@ -270,11 +214,9 @@ define <8 x double> @shuffle_v8f64_08192a3b(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08991abb(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_08991abb:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm1[0,0,1,1]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,3,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 8, i32 9, i32 9, i32 1, i32 10, i32 11, i32 11>
ret <8 x double> %shuffle
@@ -283,12 +225,9 @@ define <8 x double> @shuffle_v8f64_08991abb(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_091b2d3f(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_091b2d3f:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[2,1,3,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
ret <8 x double> %shuffle
@@ -297,11 +236,9 @@ define <8 x double> @shuffle_v8f64_091b2d3f(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_09ab1def(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_09ab1def:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpermilpd {{.*#+}} ymm3 = ymm0[1,0,2,2]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
ret <8 x double> %shuffle
@@ -310,10 +247,7 @@ define <8 x double> @shuffle_v8f64_09ab1def(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00014445(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00014445:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[0,0,0,1]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermpd $64, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 4, i32 4, i32 4, i32 5>
ret <8 x double> %shuffle
@@ -322,10 +256,7 @@ define <8 x double> @shuffle_v8f64_00014445(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00204464(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00204464:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[0,0,2,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermpd $32, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 4, i32 6, i32 4>
ret <8 x double> %shuffle
@@ -334,10 +265,7 @@ define <8 x double> @shuffle_v8f64_00204464(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_03004744(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_03004744:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[0,3,0,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermpd $12, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 7, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -346,10 +274,7 @@ define <8 x double> @shuffle_v8f64_03004744(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10005444(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_10005444:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[1,0,0,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermpd $1, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 5, i32 4, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -358,10 +283,7 @@ define <8 x double> @shuffle_v8f64_10005444(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_22006644(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_22006644:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,2,0,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermpd $10, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 6, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -370,10 +292,7 @@ define <8 x double> @shuffle_v8f64_22006644(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_33307774(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_33307774:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[3,3,3,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermpd $63, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 7, i32 7, i32 4>
ret <8 x double> %shuffle
@@ -382,10 +301,7 @@ define <8 x double> @shuffle_v8f64_33307774(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_32107654(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_32107654:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[3,2,1,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,1,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermpd $27, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
ret <8 x double> %shuffle
@@ -394,10 +310,7 @@ define <8 x double> @shuffle_v8f64_32107654(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00234467(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00234467:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[0,0,2,3]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $136, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 4, i32 4, i32 6, i32 7>
ret <8 x double> %shuffle
@@ -406,10 +319,7 @@ define <8 x double> @shuffle_v8f64_00234467(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00224466(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00224466:
; ALL: # BB#0:
-; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm0[0,0,2,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $0, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
ret <8 x double> %shuffle
@@ -418,10 +328,7 @@ define <8 x double> @shuffle_v8f64_00224466(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10325476(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_10325476:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[1,0,3,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $85, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
ret <8 x double> %shuffle
@@ -430,10 +337,7 @@ define <8 x double> @shuffle_v8f64_10325476(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_11335577(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_11335577:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[1,1,3,3]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $255, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
ret <8 x double> %shuffle
@@ -442,10 +346,7 @@ define <8 x double> @shuffle_v8f64_11335577(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10235467(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_10235467:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[1,0,2,3]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $153, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
ret <8 x double> %shuffle
@@ -454,10 +355,7 @@ define <8 x double> @shuffle_v8f64_10235467(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10225466(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_10225466:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[1,0,2,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $17, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 2, i32 5, i32 4, i32 6, i32 6>
ret <8 x double> %shuffle
@@ -466,10 +364,8 @@ define <8 x double> @shuffle_v8f64_10225466(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00015444(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00015444:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[0,0,0,1]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 5, i32 4, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -478,10 +374,8 @@ define <8 x double> @shuffle_v8f64_00015444(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00204644(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00204644:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[0,0,2,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 6, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -490,10 +384,8 @@ define <8 x double> @shuffle_v8f64_00204644(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_03004474(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_03004474:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[0,3,0,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,3,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 4, i32 7, i32 4>
ret <8 x double> %shuffle
@@ -502,10 +394,8 @@ define <8 x double> @shuffle_v8f64_03004474(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10004444(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_10004444:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[1,0,0,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -514,10 +404,8 @@ define <8 x double> @shuffle_v8f64_10004444(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_22006446(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_22006446:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,2,0,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,0,0,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 4, i32 4, i32 6>
ret <8 x double> %shuffle
@@ -526,10 +414,8 @@ define <8 x double> @shuffle_v8f64_22006446(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_33307474(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_33307474:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[3,3,3,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,3,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 4, i32 7, i32 4>
ret <8 x double> %shuffle
@@ -538,9 +424,8 @@ define <8 x double> @shuffle_v8f64_33307474(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_32104567(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_32104567:
; ALL: # BB#0:
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[3,2,1,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
ret <8 x double> %shuffle
@@ -549,10 +434,8 @@ define <8 x double> @shuffle_v8f64_32104567(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00236744(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00236744:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[0,0,2,3]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 6, i32 7, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -561,10 +444,8 @@ define <8 x double> @shuffle_v8f64_00236744(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00226644(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00226644:
; ALL: # BB#0:
-; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm0[0,0,2,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 6, i32 6, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -573,9 +454,7 @@ define <8 x double> @shuffle_v8f64_00226644(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_10324567(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_10324567:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[1,0,3,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $165, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7>
ret <8 x double> %shuffle
@@ -584,9 +463,7 @@ define <8 x double> @shuffle_v8f64_10324567(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_11334567(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_11334567:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[1,1,3,3]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $175, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x double> %shuffle
@@ -595,9 +472,7 @@ define <8 x double> @shuffle_v8f64_11334567(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01235467(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_01235467:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,0,2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd $154, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
ret <8 x double> %shuffle
@@ -606,9 +481,7 @@ define <8 x double> @shuffle_v8f64_01235467(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_01235466(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_01235466:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,0,2,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd $26, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 6, i32 6>
ret <8 x double> %shuffle
@@ -617,10 +490,8 @@ define <8 x double> @shuffle_v8f64_01235466(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_002u6u44(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_002u6u44:
; ALL: # BB#0:
-; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm0[0,0,2,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,0,0]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 undef, i32 6, i32 undef, i32 4, i32 4>
ret <8 x double> %shuffle
@@ -629,10 +500,8 @@ define <8 x double> @shuffle_v8f64_002u6u44(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00uu66uu(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00uu66uu:
; ALL: # BB#0:
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm1
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 undef, i32 undef, i32 6, i32 6, i32 undef, i32 undef>
ret <8 x double> %shuffle
@@ -641,9 +510,7 @@ define <8 x double> @shuffle_v8f64_00uu66uu(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_103245uu(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_103245uu:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[1,0,3,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $37, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 undef, i32 undef>
ret <8 x double> %shuffle
@@ -652,9 +519,7 @@ define <8 x double> @shuffle_v8f64_103245uu(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_1133uu67(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_1133uu67:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[1,1,3,3]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $143, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 undef, i32 6, i32 7>
ret <8 x double> %shuffle
@@ -663,9 +528,7 @@ define <8 x double> @shuffle_v8f64_1133uu67(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_0uu354uu(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_0uu354uu:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,0,2,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd $24, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 undef, i32 undef, i32 3, i32 5, i32 4, i32 undef, i32 undef>
ret <8 x double> %shuffle
@@ -674,9 +537,7 @@ define <8 x double> @shuffle_v8f64_0uu354uu(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_uuu3uu66(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_uuu3uu66:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm1[0,0,2,2]
-; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd $8, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 6, i32 6>
ret <8 x double> %shuffle
@@ -685,16 +546,9 @@ define <8 x double> @shuffle_v8f64_uuu3uu66(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_c348cda0(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_c348cda0:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm2[0,1]
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vbroadcastsd %xmm1, %ymm4
-; ALL-NEXT: vblendpd {{.*#+}} ymm4 = ymm3[0,1,2],ymm4[3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3]
-; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 12, i32 3, i32 4, i32 8, i32 12, i32 13, i32 10, i32 0>
ret <8 x double> %shuffle
@@ -703,17 +557,9 @@ define <8 x double> @shuffle_v8f64_c348cda0(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_f511235a(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_f511235a:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vblendpd {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2,3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,3,1,3]
-; ALL-NEXT: vmovddup {{.*#+}} ymm4 = ymm1[0,0,2,2]
-; ALL-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,1]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3]
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm1
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,1,2,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm3, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2pd %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 15, i32 5, i32 1, i32 1, i32 2, i32 3, i32 5, i32 10>
ret <8 x double> %shuffle
@@ -731,9 +577,8 @@ define <8 x i64> @shuffle_v8i64_00000000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00000010(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00000010:
; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 0>
ret <8 x i64> %shuffle
@@ -742,9 +587,8 @@ define <8 x i64> @shuffle_v8i64_00000010(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00000200(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00000200:
; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 0>
ret <8 x i64> %shuffle
@@ -753,9 +597,8 @@ define <8 x i64> @shuffle_v8i64_00000200(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00003000(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00003000:
; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,0,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0>
ret <8 x i64> %shuffle
@@ -764,11 +607,8 @@ define <8 x i64> @shuffle_v8i64_00003000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00040000(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00040000:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpbroadcastq %xmm1, %ymm1
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm0
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 4, i32 0, i32 0, i32 0, i32 0>
ret <8 x i64> %shuffle
@@ -777,11 +617,8 @@ define <8 x i64> @shuffle_v8i64_00040000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00500000(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00500000:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <8 x i64> %shuffle
@@ -790,11 +627,8 @@ define <8 x i64> @shuffle_v8i64_00500000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_06000000(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_06000000:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,0,0]
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <8 x i64> %shuffle
@@ -803,11 +637,11 @@ define <8 x i64> @shuffle_v8i64_06000000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_70000000(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_70000000:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,0,0,0]
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; ALL-NEXT: movl $7, %eax
+; ALL-NEXT: vpinsrq $0, %rax, %xmm1, %xmm2
+; ALL-NEXT: vinserti32x4 $0, %xmm2, %zmm1, %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <8 x i64> %shuffle
@@ -816,10 +650,7 @@ define <8 x i64> @shuffle_v8i64_70000000(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01014545(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_01014545:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
-; ALL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermq $68, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>
ret <8 x i64> %shuffle
@@ -828,9 +659,8 @@ define <8 x i64> @shuffle_v8i64_01014545(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00112233(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00112233:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,1,1]
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
ret <8 x i64> %shuffle
@@ -839,9 +669,8 @@ define <8 x i64> @shuffle_v8i64_00112233(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00001111(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00001111:
; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1>
ret <8 x i64> %shuffle
@@ -850,11 +679,7 @@ define <8 x i64> @shuffle_v8i64_00001111(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_81a3c5e7(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_81a3c5e7:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vshufpd $170, %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
ret <8 x i64> %shuffle
@@ -863,10 +688,9 @@ define <8 x i64> @shuffle_v8i64_81a3c5e7(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08080808(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_08080808:
; ALL: # BB#0:
-; ALL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; ALL-NEXT: vpbroadcastq %xmm1, %ymm1
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8>
ret <8 x i64> %shuffle
@@ -875,15 +699,9 @@ define <8 x i64> @shuffle_v8i64_08080808(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08084c4c(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_08084c4c:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vinserti128 $1, %xmm2, %ymm2, %ymm2
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vpbroadcastq %xmm3, %ymm3
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
-; ALL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; ALL-NEXT: vpbroadcastq %xmm1, %ymm1
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 8, i32 0, i32 8, i32 4, i32 12, i32 4, i32 12>
ret <8 x i64> %shuffle
@@ -892,13 +710,9 @@ define <8 x i64> @shuffle_v8i64_08084c4c(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_8823cc67(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_8823cc67:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vpbroadcastq %xmm3, %ymm3
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; ALL-NEXT: vpbroadcastq %xmm1, %ymm1
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 8, i32 8, i32 2, i32 3, i32 12, i32 12, i32 6, i32 7>
ret <8 x i64> %shuffle
@@ -907,13 +721,9 @@ define <8 x i64> @shuffle_v8i64_8823cc67(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_9832dc76(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_9832dc76:
; ALL: # BB#0:
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; ALL-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 9, i32 8, i32 3, i32 2, i32 13, i32 12, i32 7, i32 6>
ret <8 x i64> %shuffle
@@ -922,13 +732,9 @@ define <8 x i64> @shuffle_v8i64_9832dc76(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_9810dc54(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_9810dc54:
; ALL: # BB#0:
-; ALL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm2
-; ALL-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 9, i32 8, i32 1, i32 0, i32 13, i32 12, i32 5, i32 4>
ret <8 x i64> %shuffle
@@ -937,15 +743,9 @@ define <8 x i64> @shuffle_v8i64_9810dc54(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08194c5d(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_08194c5d:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,1,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
ret <8 x i64> %shuffle
@@ -954,15 +754,9 @@ define <8 x i64> @shuffle_v8i64_08194c5d(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_2a3b6e7f(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_2a3b6e7f:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,3,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
ret <8 x i64> %shuffle
@@ -971,13 +765,9 @@ define <8 x i64> @shuffle_v8i64_2a3b6e7f(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08192a3b(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_08192a3b:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm1[0,2,2,3]
-; ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,1,3,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
ret <8 x i64> %shuffle
@@ -986,11 +776,9 @@ define <8 x i64> @shuffle_v8i64_08192a3b(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08991abb(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_08991abb:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm1[0,0,1,1]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2,3,4,5,6,7]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,3]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 8, i32 9, i32 9, i32 1, i32 10, i32 11, i32 11>
ret <8 x i64> %shuffle
@@ -999,12 +787,9 @@ define <8 x i64> @shuffle_v8i64_08991abb(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_091b2d3f(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_091b2d3f:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,1,3,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm1, %zmm0, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
ret <8 x i64> %shuffle
@@ -1013,11 +798,9 @@ define <8 x i64> @shuffle_v8i64_091b2d3f(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_09ab1def(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_09ab1def:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
ret <8 x i64> %shuffle
@@ -1026,10 +809,7 @@ define <8 x i64> @shuffle_v8i64_09ab1def(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00014445(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00014445:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,0,1]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermq $64, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 4, i32 4, i32 4, i32 5>
ret <8 x i64> %shuffle
@@ -1038,10 +818,7 @@ define <8 x i64> @shuffle_v8i64_00014445(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00204464(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00204464:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,2,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermq $32, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 4, i32 6, i32 4>
ret <8 x i64> %shuffle
@@ -1050,10 +827,7 @@ define <8 x i64> @shuffle_v8i64_00204464(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_03004744(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_03004744:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,3,0,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermq $12, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 7, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1062,10 +836,7 @@ define <8 x i64> @shuffle_v8i64_03004744(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10005444(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_10005444:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[1,0,0,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermq $1, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 5, i32 4, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1074,10 +845,7 @@ define <8 x i64> @shuffle_v8i64_10005444(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_22006644(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_22006644:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,2,0,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermq $10, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 6, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1086,10 +854,7 @@ define <8 x i64> @shuffle_v8i64_22006644(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_33307774(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_33307774:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[3,3,3,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,3,3,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermq $63, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 7, i32 7, i32 4>
ret <8 x i64> %shuffle
@@ -1098,10 +863,7 @@ define <8 x i64> @shuffle_v8i64_33307774(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_32107654(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_32107654:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[3,2,1,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,1,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermq $27, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
ret <8 x i64> %shuffle
@@ -1110,10 +872,7 @@ define <8 x i64> @shuffle_v8i64_32107654(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00234467(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00234467:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,2,3]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,3]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $136, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 4, i32 4, i32 6, i32 7>
ret <8 x i64> %shuffle
@@ -1122,10 +881,7 @@ define <8 x i64> @shuffle_v8i64_00234467(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00224466(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00224466:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[0,1,0,1,4,5,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $0, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
ret <8 x i64> %shuffle
@@ -1134,10 +890,7 @@ define <8 x i64> @shuffle_v8i64_00224466(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10325476(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_10325476:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $85, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
ret <8 x i64> %shuffle
@@ -1146,10 +899,7 @@ define <8 x i64> @shuffle_v8i64_10325476(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_11335577(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_11335577:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[2,3,2,3,6,7,6,7]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $255, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
ret <8 x i64> %shuffle
@@ -1158,10 +908,7 @@ define <8 x i64> @shuffle_v8i64_11335577(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10235467(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_10235467:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[1,0,2,3]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,3]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $153, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
ret <8 x i64> %shuffle
@@ -1170,10 +917,7 @@ define <8 x i64> @shuffle_v8i64_10235467(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10225466(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_10225466:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[1,0,2,2]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,2]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $17, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 2, i32 2, i32 5, i32 4, i32 6, i32 6>
ret <8 x i64> %shuffle
@@ -1182,10 +926,8 @@ define <8 x i64> @shuffle_v8i64_10225466(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00015444(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00015444:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,0,1]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 5, i32 4, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1194,10 +936,8 @@ define <8 x i64> @shuffle_v8i64_00015444(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00204644(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00204644:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,2,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 0, i32 4, i32 6, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1206,10 +946,8 @@ define <8 x i64> @shuffle_v8i64_00204644(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_03004474(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_03004474:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,3,0,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,3,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 3, i32 0, i32 0, i32 4, i32 4, i32 7, i32 4>
ret <8 x i64> %shuffle
@@ -1218,10 +956,8 @@ define <8 x i64> @shuffle_v8i64_03004474(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10004444(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_10004444:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[1,0,0,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1230,10 +966,8 @@ define <8 x i64> @shuffle_v8i64_10004444(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_22006446(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_22006446:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,2,0,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,0,0,2]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 2, i32 0, i32 0, i32 6, i32 4, i32 4, i32 6>
ret <8 x i64> %shuffle
@@ -1242,10 +976,8 @@ define <8 x i64> @shuffle_v8i64_22006446(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_33307474(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_33307474:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[3,3,3,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,0,3,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 3, i32 3, i32 3, i32 0, i32 7, i32 4, i32 7, i32 4>
ret <8 x i64> %shuffle
@@ -1254,9 +986,8 @@ define <8 x i64> @shuffle_v8i64_33307474(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_32104567(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_32104567:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[3,2,1,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
ret <8 x i64> %shuffle
@@ -1265,10 +996,8 @@ define <8 x i64> @shuffle_v8i64_32104567(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00236744(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00236744:
; ALL: # BB#0:
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,0,2,3]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 3, i32 6, i32 7, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1277,10 +1006,8 @@ define <8 x i64> @shuffle_v8i64_00236744(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00226644(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00226644:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[0,1,0,1,4,5,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 6, i32 6, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1289,9 +1016,7 @@ define <8 x i64> @shuffle_v8i64_00226644(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_10324567(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_10324567:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $165, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7>
ret <8 x i64> %shuffle
@@ -1300,9 +1025,7 @@ define <8 x i64> @shuffle_v8i64_10324567(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_11334567(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_11334567:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[2,3,2,3,6,7,6,7]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $175, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i64> %shuffle
@@ -1311,9 +1034,7 @@ define <8 x i64> @shuffle_v8i64_11334567(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01235467(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_01235467:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,0,2,3]
-; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd $154, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
ret <8 x i64> %shuffle
@@ -1322,9 +1043,7 @@ define <8 x i64> @shuffle_v8i64_01235467(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_01235466(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_01235466:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,0,2,2]
-; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd $26, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 6, i32 6>
ret <8 x i64> %shuffle
@@ -1333,10 +1052,8 @@ define <8 x i64> @shuffle_v8i64_01235466(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_002u6u44(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_002u6u44:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[0,1,0,1,4,5,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,0,0]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 undef, i32 6, i32 undef, i32 4, i32 4>
ret <8 x i64> %shuffle
@@ -1345,10 +1062,8 @@ define <8 x i64> @shuffle_v8i64_002u6u44(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_00uu66uu(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00uu66uu:
; ALL: # BB#0:
-; ALL-NEXT: vpbroadcastq %xmm0, %ymm1
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm1
+; ALL-NEXT: vpermq %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 undef, i32 undef, i32 6, i32 6, i32 undef, i32 undef>
ret <8 x i64> %shuffle
@@ -1357,9 +1072,7 @@ define <8 x i64> @shuffle_v8i64_00uu66uu(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_103245uu(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_103245uu:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $37, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 undef, i32 undef>
ret <8 x i64> %shuffle
@@ -1368,9 +1081,7 @@ define <8 x i64> @shuffle_v8i64_103245uu(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_1133uu67(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_1133uu67:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[2,3,2,3,6,7,6,7]
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT: vpermilpd $143, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 undef, i32 6, i32 7>
ret <8 x i64> %shuffle
@@ -1379,9 +1090,7 @@ define <8 x i64> @shuffle_v8i64_1133uu67(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_0uu354uu(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_0uu354uu:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd $24, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 undef, i32 undef, i32 3, i32 5, i32 4, i32 undef, i32 undef>
ret <8 x i64> %shuffle
@@ -1390,9 +1099,7 @@ define <8 x i64> @shuffle_v8i64_0uu354uu(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_uuu3uu66(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_uuu3uu66:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,1,4,5,4,5]
-; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd $8, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 6, i32 6>
ret <8 x i64> %shuffle
@@ -1401,15 +1108,9 @@ define <8 x i64> @shuffle_v8i64_uuu3uu66(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_6caa87e5(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_6caa87e5:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; ALL-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; ALL-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm0[2,3],ymm3[4,5],ymm0[6,7]
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,1,4,5,4,5]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; ALL-NEXT: vmovdqa64 {{.*}}(%rip), %zmm2
+; ALL-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
+; ALL-NEXT: vmovaps %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 6, i32 12, i32 10, i32 10, i32 8, i32 7, i32 14, i32 5>
ret <8 x i64> %shuffle
diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll
new file mode 100644
index 0000000000000..422fe052d38b9
--- /dev/null
+++ b/test/CodeGen/X86/vector-tzcnt-128.ll
@@ -0,0 +1,1788 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+target triple = "x86_64-unknown-unknown"
+
+define <2 x i64> @testv2i64(<2 x i64> %in) {
+; SSE2-LABEL: testv2i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: bsfq %rax, %rax
+; SSE2-NEXT: movl $64, %ecx
+; SSE2-NEXT: cmoveq %rcx, %rax
+; SSE2-NEXT: movd %rax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: bsfq %rax, %rax
+; SSE2-NEXT: cmoveq %rcx, %rax
+; SSE2-NEXT: movd %rax, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv2i64:
+; SSE3: # BB#0:
+; SSE3-NEXT: movd %xmm0, %rax
+; SSE3-NEXT: bsfq %rax, %rax
+; SSE3-NEXT: movl $64, %ecx
+; SSE3-NEXT: cmoveq %rcx, %rax
+; SSE3-NEXT: movd %rax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %rax
+; SSE3-NEXT: bsfq %rax, %rax
+; SSE3-NEXT: cmoveq %rcx, %rax
+; SSE3-NEXT: movd %rax, %xmm0
+; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv2i64:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: bsfq %rax, %rax
+; SSSE3-NEXT: movl $64, %ecx
+; SSSE3-NEXT: cmoveq %rcx, %rax
+; SSSE3-NEXT: movd %rax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: bsfq %rax, %rax
+; SSSE3-NEXT: cmoveq %rcx, %rax
+; SSSE3-NEXT: movd %rax, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv2i64:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrq $1, %xmm0, %rax
+; SSE41-NEXT: bsfq %rax, %rax
+; SSE41-NEXT: movl $64, %ecx
+; SSE41-NEXT: cmoveq %rcx, %rax
+; SSE41-NEXT: movd %rax, %xmm1
+; SSE41-NEXT: movd %xmm0, %rax
+; SSE41-NEXT: bsfq %rax, %rax
+; SSE41-NEXT: cmoveq %rcx, %rax
+; SSE41-NEXT: movd %rax, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rax
+; AVX-NEXT: bsfq %rax, %rax
+; AVX-NEXT: movl $64, %ecx
+; AVX-NEXT: cmoveq %rcx, %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: bsfq %rax, %rax
+; AVX-NEXT: cmoveq %rcx, %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %in, i1 0)
+ ret <2 x i64> %out
+}
+
+define <2 x i64> @testv2i64u(<2 x i64> %in) {
+; SSE2-LABEL: testv2i64u:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: bsfq %rax, %rax
+; SSE2-NEXT: movd %rax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: bsfq %rax, %rax
+; SSE2-NEXT: movd %rax, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv2i64u:
+; SSE3: # BB#0:
+; SSE3-NEXT: movd %xmm0, %rax
+; SSE3-NEXT: bsfq %rax, %rax
+; SSE3-NEXT: movd %rax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %rax
+; SSE3-NEXT: bsfq %rax, %rax
+; SSE3-NEXT: movd %rax, %xmm0
+; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv2i64u:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: bsfq %rax, %rax
+; SSSE3-NEXT: movd %rax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: bsfq %rax, %rax
+; SSSE3-NEXT: movd %rax, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv2i64u:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrq $1, %xmm0, %rax
+; SSE41-NEXT: bsfq %rax, %rax
+; SSE41-NEXT: movd %rax, %xmm1
+; SSE41-NEXT: movd %xmm0, %rax
+; SSE41-NEXT: bsfq %rax, %rax
+; SSE41-NEXT: movd %rax, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv2i64u:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rax
+; AVX-NEXT: bsfq %rax, %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: bsfq %rax, %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %in, i1 -1)
+ ret <2 x i64> %out
+}
+
+define <4 x i32> @testv4i32(<4 x i32> %in) {
+; SSE2-LABEL: testv4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movl $32, %ecx
+; SSE2-NEXT: cmovel %ecx, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: cmovel %ecx, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: cmovel %ecx, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: cmovel %ecx, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv4i32:
+; SSE3: # BB#0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm1, %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movl $32, %ecx
+; SSE3-NEXT: cmovel %ecx, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE3-NEXT: movd %xmm2, %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: cmovel %ecx, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: cmovel %ecx, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: cmovel %ecx, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv4i32:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSSE3-NEXT: movd %xmm1, %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movl $32, %ecx
+; SSSE3-NEXT: cmovel %ecx, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSSE3-NEXT: movd %xmm2, %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: cmovel %ecx, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: cmovel %ecx, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: cmovel %ecx, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrd $1, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: movl $32, %ecx
+; SSE41-NEXT: cmovel %ecx, %eax
+; SSE41-NEXT: movd %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: movd %edx, %xmm1
+; SSE41-NEXT: pinsrd $1, %eax, %xmm1
+; SSE41-NEXT: pextrd $2, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: cmovel %ecx, %eax
+; SSE41-NEXT: pinsrd $2, %eax, %xmm1
+; SSE41-NEXT: pextrd $3, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: cmovel %ecx, %eax
+; SSE41-NEXT: pinsrd $3, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: movl $32, %ecx
+; AVX-NEXT: cmovel %ecx, %eax
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vmovd %edx, %xmm1
+; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrd $2, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: cmovel %ecx, %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrd $3, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: cmovel %ecx, %eax
+; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %in, i1 0)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @testv4i32u(<4 x i32> %in) {
+; SSE2-LABEL: testv4i32u:
+; SSE2: # BB#0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv4i32u:
+; SSE3: # BB#0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE3-NEXT: movd %xmm1, %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE3-NEXT: movd %xmm2, %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv4i32u:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSSE3-NEXT: movd %xmm1, %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSSE3-NEXT: movd %xmm2, %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv4i32u:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrd $1, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: movd %xmm0, %ecx
+; SSE41-NEXT: bsfl %ecx, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrd $1, %eax, %xmm1
+; SSE41-NEXT: pextrd $2, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrd $2, %eax, %xmm1
+; SSE41-NEXT: pextrd $3, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrd $3, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv4i32u:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vmovd %xmm0, %ecx
+; AVX-NEXT: bsfl %ecx, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrd $2, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrd $3, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %in, i1 -1)
+ ret <4 x i32> %out
+}
+
+define <8 x i16> @testv8i16(<8 x i16> %in) {
+; SSE2-LABEL: testv8i16:
+; SSE2: # BB#0:
+; SSE2-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %cx
+; SSE2-NEXT: movw $16, %ax
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: pextrw $3, %xmm0, %ecx
+; SSE2-NEXT: bsfw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: pextrw $5, %xmm0, %ecx
+; SSE2-NEXT: bsfw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: pextrw $1, %xmm0, %ecx
+; SSE2-NEXT: bsfw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: pextrw $6, %xmm0, %ecx
+; SSE2-NEXT: bsfw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: pextrw $2, %xmm0, %ecx
+; SSE2-NEXT: bsfw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: pextrw $4, %xmm0, %ecx
+; SSE2-NEXT: bsfw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: bsfw %cx, %cx
+; SSE2-NEXT: cmovew %ax, %cx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv8i16:
+; SSE3: # BB#0:
+; SSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %cx
+; SSE3-NEXT: movw $16, %ax
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: movd %ecx, %xmm1
+; SSE3-NEXT: pextrw $3, %xmm0, %ecx
+; SSE3-NEXT: bsfw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE3-NEXT: pextrw $5, %xmm0, %ecx
+; SSE3-NEXT: bsfw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: movd %ecx, %xmm3
+; SSE3-NEXT: pextrw $1, %xmm0, %ecx
+; SSE3-NEXT: bsfw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: movd %ecx, %xmm1
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE3-NEXT: pextrw $6, %xmm0, %ecx
+; SSE3-NEXT: bsfw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: pextrw $2, %xmm0, %ecx
+; SSE3-NEXT: bsfw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: movd %ecx, %xmm3
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE3-NEXT: pextrw $4, %xmm0, %ecx
+; SSE3-NEXT: bsfw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: movd %xmm0, %ecx
+; SSE3-NEXT: bsfw %cx, %cx
+; SSE3-NEXT: cmovew %ax, %cx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv8i16:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %cx
+; SSSE3-NEXT: movw $16, %ax
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: pextrw $3, %xmm0, %ecx
+; SSSE3-NEXT: bsfw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: pextrw $5, %xmm0, %ecx
+; SSSE3-NEXT: bsfw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: pextrw $1, %xmm0, %ecx
+; SSSE3-NEXT: bsfw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: pextrw $6, %xmm0, %ecx
+; SSSE3-NEXT: bsfw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: pextrw $2, %xmm0, %ecx
+; SSSE3-NEXT: bsfw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSSE3-NEXT: pextrw $4, %xmm0, %ecx
+; SSSE3-NEXT: bsfw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: movd %xmm0, %ecx
+; SSSE3-NEXT: bsfw %cx, %cx
+; SSSE3-NEXT: cmovew %ax, %cx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv8i16:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %cx
+; SSE41-NEXT: movw $16, %ax
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: movd %xmm0, %edx
+; SSE41-NEXT: bsfw %dx, %dx
+; SSE41-NEXT: cmovew %ax, %dx
+; SSE41-NEXT: movd %edx, %xmm1
+; SSE41-NEXT: pinsrw $1, %ecx, %xmm1
+; SSE41-NEXT: pextrw $2, %xmm0, %ecx
+; SSE41-NEXT: bsfw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: pinsrw $2, %ecx, %xmm1
+; SSE41-NEXT: pextrw $3, %xmm0, %ecx
+; SSE41-NEXT: bsfw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: pinsrw $3, %ecx, %xmm1
+; SSE41-NEXT: pextrw $4, %xmm0, %ecx
+; SSE41-NEXT: bsfw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: pinsrw $4, %ecx, %xmm1
+; SSE41-NEXT: pextrw $5, %xmm0, %ecx
+; SSE41-NEXT: bsfw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: pinsrw $5, %ecx, %xmm1
+; SSE41-NEXT: pextrw $6, %xmm0, %ecx
+; SSE41-NEXT: bsfw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: pinsrw $6, %ecx, %xmm1
+; SSE41-NEXT: pextrw $7, %xmm0, %ecx
+; SSE41-NEXT: bsfw %cx, %cx
+; SSE41-NEXT: cmovew %ax, %cx
+; SSE41-NEXT: pinsrw $7, %ecx, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-NEXT: bsfw %ax, %cx
+; AVX-NEXT: movw $16, %ax
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: bsfw %dx, %dx
+; AVX-NEXT: cmovew %ax, %dx
+; AVX-NEXT: vmovd %edx, %xmm1
+; AVX-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX-NEXT: bsfw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX-NEXT: bsfw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX-NEXT: bsfw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $5, %xmm0, %ecx
+; AVX-NEXT: bsfw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $6, %xmm0, %ecx
+; AVX-NEXT: bsfw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $7, %xmm0, %ecx
+; AVX-NEXT: bsfw %cx, %cx
+; AVX-NEXT: cmovew %ax, %cx
+; AVX-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %in, i1 0)
+ ret <8 x i16> %out
+}
+
+define <8 x i16> @testv8i16u(<8 x i16> %in) {
+; SSE2-LABEL: testv8i16u:
+; SSE2: # BB#0:
+; SSE2-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %ax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %ax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %ax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %ax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %ax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %ax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %ax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: bsfw %ax, %ax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv8i16u:
+; SSE3: # BB#0:
+; SSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %ax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %ax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %ax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %ax
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE3-NEXT: pextrw $6, %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %ax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %ax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %ax
+; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: bsfw %ax, %ax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv8i16u:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %ax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %ax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %ax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pextrw $1, %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %ax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSSE3-NEXT: pextrw $6, %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %ax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %ax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %ax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: movd %xmm0, %eax
+; SSSE3-NEXT: bsfw %ax, %ax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv8i16u:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrw $1, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: movd %xmm0, %ecx
+; SSE41-NEXT: bsfw %cx, %cx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrw $1, %eax, %xmm1
+; SSE41-NEXT: pextrw $2, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $2, %eax, %xmm1
+; SSE41-NEXT: pextrw $3, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $3, %eax, %xmm1
+; SSE41-NEXT: pextrw $4, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $4, %eax, %xmm1
+; SSE41-NEXT: pextrw $5, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $5, %eax, %xmm1
+; SSE41-NEXT: pextrw $6, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $6, %eax, %xmm1
+; SSE41-NEXT: pextrw $7, %xmm0, %eax
+; SSE41-NEXT: bsfw %ax, %ax
+; SSE41-NEXT: pinsrw $7, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv8i16u:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-NEXT: bsfw %ax, %ax
+; AVX-NEXT: vmovd %xmm0, %ecx
+; AVX-NEXT: bsfw %cx, %cx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $2, %xmm0, %eax
+; AVX-NEXT: bsfw %ax, %ax
+; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $3, %xmm0, %eax
+; AVX-NEXT: bsfw %ax, %ax
+; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $4, %xmm0, %eax
+; AVX-NEXT: bsfw %ax, %ax
+; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $5, %xmm0, %eax
+; AVX-NEXT: bsfw %ax, %ax
+; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $6, %xmm0, %eax
+; AVX-NEXT: bsfw %ax, %ax
+; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrw $7, %xmm0, %eax
+; AVX-NEXT: bsfw %ax, %ax
+; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %in, i1 -1)
+ ret <8 x i16> %out
+}
+
+define <16 x i8> @testv16i8(<16 x i8> %in) {
+; SSE2-LABEL: testv16i8:
+; SSE2: # BB#0:
+; SSE2: pushq %rbp
+; SSE2: pushq %r14
+; SSE2: pushq %rbx
+; SSE2: movaps %xmm0, -16(%rsp)
+; SSE2-NEXT: movzbl -1(%rsp), %eax
+; SSE2-NEXT: bsfl %eax, %edx
+; SSE2-NEXT: movl $32, %eax
+; SSE2-NEXT: cmovel %eax, %edx
+; SSE2-NEXT: cmpl $32, %edx
+; SSE2-NEXT: movl $8, %ecx
+; SSE2-NEXT: cmovel %ecx, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: movzbl -2(%rsp), %r14d
+; SSE2-NEXT: movzbl -3(%rsp), %ebx
+; SSE2-NEXT: movzbl -4(%rsp), %r9d
+; SSE2-NEXT: movzbl -5(%rsp), %edi
+; SSE2-NEXT: movzbl -6(%rsp), %r11d
+; SSE2-NEXT: movzbl -7(%rsp), %edx
+; SSE2-NEXT: movzbl -8(%rsp), %r8d
+; SSE2-NEXT: movzbl -9(%rsp), %esi
+; SSE2-NEXT: bsfl %esi, %esi
+; SSE2-NEXT: cmovel %eax, %esi
+; SSE2-NEXT: cmpl $32, %esi
+; SSE2-NEXT: cmovel %ecx, %esi
+; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: bsfl %edi, %esi
+; SSE2-NEXT: cmovel %eax, %esi
+; SSE2-NEXT: cmpl $32, %esi
+; SSE2-NEXT: cmovel %ecx, %esi
+; SSE2-NEXT: movd %esi, %xmm2
+; SSE2-NEXT: movzbl -10(%rsp), %edi
+; SSE2-NEXT: movzbl -11(%rsp), %esi
+; SSE2-NEXT: movzbl -12(%rsp), %r10d
+; SSE2-NEXT: movzbl -13(%rsp), %ebp
+; SSE2-NEXT: bsfl %ebp, %ebp
+; SSE2-NEXT: cmovel %eax, %ebp
+; SSE2-NEXT: cmpl $32, %ebp
+; SSE2-NEXT: cmovel %ecx, %ebp
+; SSE2-NEXT: movd %ebp, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: bsfl %ebx, %ebx
+; SSE2-NEXT: cmovel %eax, %ebx
+; SSE2-NEXT: cmpl $32, %ebx
+; SSE2-NEXT: cmovel %ecx, %ebx
+; SSE2-NEXT: movd %ebx, %xmm1
+; SSE2-NEXT: bsfl %esi, %esi
+; SSE2-NEXT: cmovel %eax, %esi
+; SSE2-NEXT: cmpl $32, %esi
+; SSE2-NEXT: cmovel %ecx, %esi
+; SSE2-NEXT: movd %esi, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: bsfl %edx, %edx
+; SSE2-NEXT: cmovel %eax, %edx
+; SSE2-NEXT: cmpl $32, %edx
+; SSE2-NEXT: cmovel %ecx, %edx
+; SSE2-NEXT: movd %edx, %xmm3
+; SSE2-NEXT: movzbl -14(%rsp), %edx
+; SSE2-NEXT: movzbl -15(%rsp), %esi
+; SSE2-NEXT: bsfl %esi, %esi
+; SSE2-NEXT: cmovel %eax, %esi
+; SSE2-NEXT: cmpl $32, %esi
+; SSE2-NEXT: cmovel %ecx, %esi
+; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: bsfl %r14d, %esi
+; SSE2-NEXT: cmovel %eax, %esi
+; SSE2-NEXT: cmpl $32, %esi
+; SSE2-NEXT: cmovel %ecx, %esi
+; SSE2-NEXT: movd %esi, %xmm0
+; SSE2-NEXT: bsfl %edi, %esi
+; SSE2-NEXT: cmovel %eax, %esi
+; SSE2-NEXT: cmpl $32, %esi
+; SSE2-NEXT: cmovel %ecx, %esi
+; SSE2-NEXT: movd %esi, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: bsfl %r11d, %esi
+; SSE2-NEXT: cmovel %eax, %esi
+; SSE2-NEXT: cmpl $32, %esi
+; SSE2-NEXT: cmovel %ecx, %esi
+; SSE2-NEXT: movd %esi, %xmm0
+; SSE2-NEXT: bsfl %edx, %edx
+; SSE2-NEXT: cmovel %eax, %edx
+; SSE2-NEXT: cmpl $32, %edx
+; SSE2-NEXT: cmovel %ecx, %edx
+; SSE2-NEXT: movd %edx, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: bsfl %r9d, %edx
+; SSE2-NEXT: cmovel %eax, %edx
+; SSE2-NEXT: cmpl $32, %edx
+; SSE2-NEXT: cmovel %ecx, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: bsfl %r10d, %edx
+; SSE2-NEXT: cmovel %eax, %edx
+; SSE2-NEXT: cmpl $32, %edx
+; SSE2-NEXT: cmovel %ecx, %edx
+; SSE2-NEXT: movd %edx, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: bsfl %r8d, %edx
+; SSE2-NEXT: cmovel %eax, %edx
+; SSE2-NEXT: cmpl $32, %edx
+; SSE2-NEXT: cmovel %ecx, %edx
+; SSE2-NEXT: movd %edx, %xmm4
+; SSE2-NEXT: movzbl -16(%rsp), %edx
+; SSE2-NEXT: bsfl %edx, %edx
+; SSE2-NEXT: cmovel %eax, %edx
+; SSE2-NEXT: cmpl $32, %edx
+; SSE2-NEXT: cmovel %ecx, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv16i8:
+; SSE3: # BB#0:
+; SSE3: pushq %rbp
+; SSE3: pushq %r14
+; SSE3: pushq %rbx
+; SSE3: movaps %xmm0, -16(%rsp)
+; SSE3-NEXT: movzbl -1(%rsp), %eax
+; SSE3-NEXT: bsfl %eax, %edx
+; SSE3-NEXT: movl $32, %eax
+; SSE3-NEXT: cmovel %eax, %edx
+; SSE3-NEXT: cmpl $32, %edx
+; SSE3-NEXT: movl $8, %ecx
+; SSE3-NEXT: cmovel %ecx, %edx
+; SSE3-NEXT: movd %edx, %xmm0
+; SSE3-NEXT: movzbl -2(%rsp), %r14d
+; SSE3-NEXT: movzbl -3(%rsp), %ebx
+; SSE3-NEXT: movzbl -4(%rsp), %r9d
+; SSE3-NEXT: movzbl -5(%rsp), %edi
+; SSE3-NEXT: movzbl -6(%rsp), %r11d
+; SSE3-NEXT: movzbl -7(%rsp), %edx
+; SSE3-NEXT: movzbl -8(%rsp), %r8d
+; SSE3-NEXT: movzbl -9(%rsp), %esi
+; SSE3-NEXT: bsfl %esi, %esi
+; SSE3-NEXT: cmovel %eax, %esi
+; SSE3-NEXT: cmpl $32, %esi
+; SSE3-NEXT: cmovel %ecx, %esi
+; SSE3-NEXT: movd %esi, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: bsfl %edi, %esi
+; SSE3-NEXT: cmovel %eax, %esi
+; SSE3-NEXT: cmpl $32, %esi
+; SSE3-NEXT: cmovel %ecx, %esi
+; SSE3-NEXT: movd %esi, %xmm2
+; SSE3-NEXT: movzbl -10(%rsp), %edi
+; SSE3-NEXT: movzbl -11(%rsp), %esi
+; SSE3-NEXT: movzbl -12(%rsp), %r10d
+; SSE3-NEXT: movzbl -13(%rsp), %ebp
+; SSE3-NEXT: bsfl %ebp, %ebp
+; SSE3-NEXT: cmovel %eax, %ebp
+; SSE3-NEXT: cmpl $32, %ebp
+; SSE3-NEXT: cmovel %ecx, %ebp
+; SSE3-NEXT: movd %ebp, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE3-NEXT: bsfl %ebx, %ebx
+; SSE3-NEXT: cmovel %eax, %ebx
+; SSE3-NEXT: cmpl $32, %ebx
+; SSE3-NEXT: cmovel %ecx, %ebx
+; SSE3-NEXT: movd %ebx, %xmm1
+; SSE3-NEXT: bsfl %esi, %esi
+; SSE3-NEXT: cmovel %eax, %esi
+; SSE3-NEXT: cmpl $32, %esi
+; SSE3-NEXT: cmovel %ecx, %esi
+; SSE3-NEXT: movd %esi, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE3-NEXT: bsfl %edx, %edx
+; SSE3-NEXT: cmovel %eax, %edx
+; SSE3-NEXT: cmpl $32, %edx
+; SSE3-NEXT: cmovel %ecx, %edx
+; SSE3-NEXT: movd %edx, %xmm3
+; SSE3-NEXT: movzbl -14(%rsp), %edx
+; SSE3-NEXT: movzbl -15(%rsp), %esi
+; SSE3-NEXT: bsfl %esi, %esi
+; SSE3-NEXT: cmovel %eax, %esi
+; SSE3-NEXT: cmpl $32, %esi
+; SSE3-NEXT: cmovel %ecx, %esi
+; SSE3-NEXT: movd %esi, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: bsfl %r14d, %esi
+; SSE3-NEXT: cmovel %eax, %esi
+; SSE3-NEXT: cmpl $32, %esi
+; SSE3-NEXT: cmovel %ecx, %esi
+; SSE3-NEXT: movd %esi, %xmm0
+; SSE3-NEXT: bsfl %edi, %esi
+; SSE3-NEXT: cmovel %eax, %esi
+; SSE3-NEXT: cmpl $32, %esi
+; SSE3-NEXT: cmovel %ecx, %esi
+; SSE3-NEXT: movd %esi, %xmm3
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: bsfl %r11d, %esi
+; SSE3-NEXT: cmovel %eax, %esi
+; SSE3-NEXT: cmpl $32, %esi
+; SSE3-NEXT: cmovel %ecx, %esi
+; SSE3-NEXT: movd %esi, %xmm0
+; SSE3-NEXT: bsfl %edx, %edx
+; SSE3-NEXT: cmovel %eax, %edx
+; SSE3-NEXT: cmpl $32, %edx
+; SSE3-NEXT: cmovel %ecx, %edx
+; SSE3-NEXT: movd %edx, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE3-NEXT: bsfl %r9d, %edx
+; SSE3-NEXT: cmovel %eax, %edx
+; SSE3-NEXT: cmpl $32, %edx
+; SSE3-NEXT: cmovel %ecx, %edx
+; SSE3-NEXT: movd %edx, %xmm0
+; SSE3-NEXT: bsfl %r10d, %edx
+; SSE3-NEXT: cmovel %eax, %edx
+; SSE3-NEXT: cmpl $32, %edx
+; SSE3-NEXT: cmovel %ecx, %edx
+; SSE3-NEXT: movd %edx, %xmm3
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: bsfl %r8d, %edx
+; SSE3-NEXT: cmovel %eax, %edx
+; SSE3-NEXT: cmpl $32, %edx
+; SSE3-NEXT: cmovel %ecx, %edx
+; SSE3-NEXT: movd %edx, %xmm4
+; SSE3-NEXT: movzbl -16(%rsp), %edx
+; SSE3-NEXT: bsfl %edx, %edx
+; SSE3-NEXT: cmovel %eax, %edx
+; SSE3-NEXT: cmpl $32, %edx
+; SSE3-NEXT: cmovel %ecx, %edx
+; SSE3-NEXT: movd %edx, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE3-NEXT: popq %rbx
+; SSE3-NEXT: popq %r14
+; SSE3-NEXT: popq %rbp
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv16i8:
+; SSSE3: # BB#0:
+; SSSE3: pushq %rbp
+; SSSE3: pushq %r14
+; SSSE3: pushq %rbx
+; SSSE3: movaps %xmm0, -16(%rsp)
+; SSSE3-NEXT: movzbl -1(%rsp), %eax
+; SSSE3-NEXT: bsfl %eax, %edx
+; SSSE3-NEXT: movl $32, %eax
+; SSSE3-NEXT: cmovel %eax, %edx
+; SSSE3-NEXT: cmpl $32, %edx
+; SSSE3-NEXT: movl $8, %ecx
+; SSSE3-NEXT: cmovel %ecx, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: movzbl -2(%rsp), %r14d
+; SSSE3-NEXT: movzbl -3(%rsp), %ebx
+; SSSE3-NEXT: movzbl -4(%rsp), %r9d
+; SSSE3-NEXT: movzbl -5(%rsp), %edi
+; SSSE3-NEXT: movzbl -6(%rsp), %r11d
+; SSSE3-NEXT: movzbl -7(%rsp), %edx
+; SSSE3-NEXT: movzbl -8(%rsp), %r8d
+; SSSE3-NEXT: movzbl -9(%rsp), %esi
+; SSSE3-NEXT: bsfl %esi, %esi
+; SSSE3-NEXT: cmovel %eax, %esi
+; SSSE3-NEXT: cmpl $32, %esi
+; SSSE3-NEXT: cmovel %ecx, %esi
+; SSSE3-NEXT: movd %esi, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: bsfl %edi, %esi
+; SSSE3-NEXT: cmovel %eax, %esi
+; SSSE3-NEXT: cmpl $32, %esi
+; SSSE3-NEXT: cmovel %ecx, %esi
+; SSSE3-NEXT: movd %esi, %xmm2
+; SSSE3-NEXT: movzbl -10(%rsp), %edi
+; SSSE3-NEXT: movzbl -11(%rsp), %esi
+; SSSE3-NEXT: movzbl -12(%rsp), %r10d
+; SSSE3-NEXT: movzbl -13(%rsp), %ebp
+; SSSE3-NEXT: bsfl %ebp, %ebp
+; SSSE3-NEXT: cmovel %eax, %ebp
+; SSSE3-NEXT: cmpl $32, %ebp
+; SSSE3-NEXT: cmovel %ecx, %ebp
+; SSSE3-NEXT: movd %ebp, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: bsfl %ebx, %ebx
+; SSSE3-NEXT: cmovel %eax, %ebx
+; SSSE3-NEXT: cmpl $32, %ebx
+; SSSE3-NEXT: cmovel %ecx, %ebx
+; SSSE3-NEXT: movd %ebx, %xmm1
+; SSSE3-NEXT: bsfl %esi, %esi
+; SSSE3-NEXT: cmovel %eax, %esi
+; SSSE3-NEXT: cmpl $32, %esi
+; SSSE3-NEXT: cmovel %ecx, %esi
+; SSSE3-NEXT: movd %esi, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSSE3-NEXT: bsfl %edx, %edx
+; SSSE3-NEXT: cmovel %eax, %edx
+; SSSE3-NEXT: cmpl $32, %edx
+; SSSE3-NEXT: cmovel %ecx, %edx
+; SSSE3-NEXT: movd %edx, %xmm3
+; SSSE3-NEXT: movzbl -14(%rsp), %edx
+; SSSE3-NEXT: movzbl -15(%rsp), %esi
+; SSSE3-NEXT: bsfl %esi, %esi
+; SSSE3-NEXT: cmovel %eax, %esi
+; SSSE3-NEXT: cmpl $32, %esi
+; SSSE3-NEXT: cmovel %ecx, %esi
+; SSSE3-NEXT: movd %esi, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: bsfl %r14d, %esi
+; SSSE3-NEXT: cmovel %eax, %esi
+; SSSE3-NEXT: cmpl $32, %esi
+; SSSE3-NEXT: cmovel %ecx, %esi
+; SSSE3-NEXT: movd %esi, %xmm0
+; SSSE3-NEXT: bsfl %edi, %esi
+; SSSE3-NEXT: cmovel %eax, %esi
+; SSSE3-NEXT: cmpl $32, %esi
+; SSSE3-NEXT: cmovel %ecx, %esi
+; SSSE3-NEXT: movd %esi, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: bsfl %r11d, %esi
+; SSSE3-NEXT: cmovel %eax, %esi
+; SSSE3-NEXT: cmpl $32, %esi
+; SSSE3-NEXT: cmovel %ecx, %esi
+; SSSE3-NEXT: movd %esi, %xmm0
+; SSSE3-NEXT: bsfl %edx, %edx
+; SSSE3-NEXT: cmovel %eax, %edx
+; SSSE3-NEXT: cmpl $32, %edx
+; SSSE3-NEXT: cmovel %ecx, %edx
+; SSSE3-NEXT: movd %edx, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSSE3-NEXT: bsfl %r9d, %edx
+; SSSE3-NEXT: cmovel %eax, %edx
+; SSSE3-NEXT: cmpl $32, %edx
+; SSSE3-NEXT: cmovel %ecx, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: bsfl %r10d, %edx
+; SSSE3-NEXT: cmovel %eax, %edx
+; SSSE3-NEXT: cmpl $32, %edx
+; SSSE3-NEXT: cmovel %ecx, %edx
+; SSSE3-NEXT: movd %edx, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: bsfl %r8d, %edx
+; SSSE3-NEXT: cmovel %eax, %edx
+; SSSE3-NEXT: cmpl $32, %edx
+; SSSE3-NEXT: cmovel %ecx, %edx
+; SSSE3-NEXT: movd %edx, %xmm4
+; SSSE3-NEXT: movzbl -16(%rsp), %edx
+; SSSE3-NEXT: bsfl %edx, %edx
+; SSSE3-NEXT: cmovel %eax, %edx
+; SSSE3-NEXT: cmpl $32, %edx
+; SSSE3-NEXT: cmovel %ecx, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: popq %rbx
+; SSSE3-NEXT: popq %r14
+; SSSE3-NEXT: popq %rbp
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv16i8:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %edx
+; SSE41-NEXT: movl $32, %eax
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: movl $8, %ecx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pextrb $0, %xmm0, %esi
+; SSE41-NEXT: bsfl %esi, %esi
+; SSE41-NEXT: cmovel %eax, %esi
+; SSE41-NEXT: cmpl $32, %esi
+; SSE41-NEXT: cmovel %ecx, %esi
+; SSE41-NEXT: movd %esi, %xmm1
+; SSE41-NEXT: pinsrb $1, %edx, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $2, %edx, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $3, %edx, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $4, %edx, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $5, %edx, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $6, %edx, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $7, %edx, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $8, %edx, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $9, %edx, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $10, %edx, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $11, %edx, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $12, %edx, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $13, %edx, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $14, %edx, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %edx
+; SSE41-NEXT: bsfl %edx, %edx
+; SSE41-NEXT: cmovel %eax, %edx
+; SSE41-NEXT: cmpl $32, %edx
+; SSE41-NEXT: cmovel %ecx, %edx
+; SSE41-NEXT: pinsrb $15, %edx, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %edx
+; AVX-NEXT: movl $32, %eax
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: movl $8, %ecx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpextrb $0, %xmm0, %esi
+; AVX-NEXT: bsfl %esi, %esi
+; AVX-NEXT: cmovel %eax, %esi
+; AVX-NEXT: cmpl $32, %esi
+; AVX-NEXT: cmovel %ecx, %esi
+; AVX-NEXT: vmovd %esi, %xmm1
+; AVX-NEXT: vpinsrb $1, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $2, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $3, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $4, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $5, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $6, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $7, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $8, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $9, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $10, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $11, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $12, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $13, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $14, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %edx
+; AVX-NEXT: bsfl %edx, %edx
+; AVX-NEXT: cmovel %eax, %edx
+; AVX-NEXT: cmpl $32, %edx
+; AVX-NEXT: cmovel %ecx, %edx
+; AVX-NEXT: vpinsrb $15, %edx, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 0)
+ ret <16 x i8> %out
+}
+
+define <16 x i8> @testv16i8u(<16 x i8> %in) {
+; SSE2-LABEL: testv16i8u:
+; SSE2: # BB#0:
+; SSE2: pushq %rbx
+; SSE2: movaps %xmm0, -16(%rsp)
+; SSE2-NEXT: movzbl -1(%rsp), %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzbl -2(%rsp), %r11d
+; SSE2-NEXT: movzbl -3(%rsp), %eax
+; SSE2-NEXT: movzbl -4(%rsp), %r9d
+; SSE2-NEXT: movzbl -5(%rsp), %edi
+; SSE2-NEXT: movzbl -6(%rsp), %r10d
+; SSE2-NEXT: movzbl -7(%rsp), %ecx
+; SSE2-NEXT: movzbl -8(%rsp), %r8d
+; SSE2-NEXT: movzbl -9(%rsp), %edx
+; SSE2-NEXT: bsfl %edx, %edx
+; SSE2-NEXT: movd %edx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: bsfl %edi, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: movzbl -10(%rsp), %edx
+; SSE2-NEXT: movzbl -11(%rsp), %esi
+; SSE2-NEXT: movzbl -12(%rsp), %edi
+; SSE2-NEXT: movzbl -13(%rsp), %ebx
+; SSE2-NEXT: bsfl %ebx, %ebx
+; SSE2-NEXT: movd %ebx, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: bsfl %esi, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklbw %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: bsfl %ecx, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzbl -14(%rsp), %eax
+; SSE2-NEXT: movzbl -15(%rsp), %ecx
+; SSE2-NEXT: bsfl %ecx, %ecx
+; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: bsfl %r11d, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: bsfl %edx, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: bsfl %r10d, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: bsfl %r9d, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: bsfl %edi, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: bsfl %r8d, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movzbl -16(%rsp), %eax
+; SSE2-NEXT: bsfl %eax, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: testv16i8u:
+; SSE3: # BB#0:
+; SSE3: pushq %rbx
+; SSE3: movaps %xmm0, -16(%rsp)
+; SSE3-NEXT: movzbl -1(%rsp), %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: movzbl -2(%rsp), %r11d
+; SSE3-NEXT: movzbl -3(%rsp), %eax
+; SSE3-NEXT: movzbl -4(%rsp), %r9d
+; SSE3-NEXT: movzbl -5(%rsp), %edi
+; SSE3-NEXT: movzbl -6(%rsp), %r10d
+; SSE3-NEXT: movzbl -7(%rsp), %ecx
+; SSE3-NEXT: movzbl -8(%rsp), %r8d
+; SSE3-NEXT: movzbl -9(%rsp), %edx
+; SSE3-NEXT: bsfl %edx, %edx
+; SSE3-NEXT: movd %edx, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: bsfl %edi, %edx
+; SSE3-NEXT: movd %edx, %xmm0
+; SSE3-NEXT: movzbl -10(%rsp), %edx
+; SSE3-NEXT: movzbl -11(%rsp), %esi
+; SSE3-NEXT: movzbl -12(%rsp), %edi
+; SSE3-NEXT: movzbl -13(%rsp), %ebx
+; SSE3-NEXT: bsfl %ebx, %ebx
+; SSE3-NEXT: movd %ebx, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: bsfl %esi, %eax
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: punpcklbw %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: bsfl %ecx, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: movzbl -14(%rsp), %eax
+; SSE3-NEXT: movzbl -15(%rsp), %ecx
+; SSE3-NEXT: bsfl %ecx, %ecx
+; SSE3-NEXT: movd %ecx, %xmm1
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE3-NEXT: bsfl %r11d, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: bsfl %edx, %ecx
+; SSE3-NEXT: movd %ecx, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: bsfl %r10d, %ecx
+; SSE3-NEXT: movd %ecx, %xmm0
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movd %eax, %xmm3
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE3-NEXT: bsfl %r9d, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: bsfl %edi, %eax
+; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE3-NEXT: bsfl %r8d, %eax
+; SSE3-NEXT: movd %eax, %xmm4
+; SSE3-NEXT: movzbl -16(%rsp), %eax
+; SSE3-NEXT: bsfl %eax, %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE3-NEXT: popq %rbx
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: testv16i8u:
+; SSSE3: # BB#0:
+; SSSE3: pushq %rbx
+; SSSE3: movaps %xmm0, -16(%rsp)
+; SSSE3-NEXT: movzbl -1(%rsp), %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzbl -2(%rsp), %r11d
+; SSSE3-NEXT: movzbl -3(%rsp), %eax
+; SSSE3-NEXT: movzbl -4(%rsp), %r9d
+; SSSE3-NEXT: movzbl -5(%rsp), %edi
+; SSSE3-NEXT: movzbl -6(%rsp), %r10d
+; SSSE3-NEXT: movzbl -7(%rsp), %ecx
+; SSSE3-NEXT: movzbl -8(%rsp), %r8d
+; SSSE3-NEXT: movzbl -9(%rsp), %edx
+; SSSE3-NEXT: bsfl %edx, %edx
+; SSSE3-NEXT: movd %edx, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: bsfl %edi, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: movzbl -10(%rsp), %edx
+; SSSE3-NEXT: movzbl -11(%rsp), %esi
+; SSSE3-NEXT: movzbl -12(%rsp), %edi
+; SSSE3-NEXT: movzbl -13(%rsp), %ebx
+; SSSE3-NEXT: bsfl %ebx, %ebx
+; SSSE3-NEXT: movd %ebx, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: bsfl %esi, %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: punpcklbw %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: bsfl %ecx, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzbl -14(%rsp), %eax
+; SSSE3-NEXT: movzbl -15(%rsp), %ecx
+; SSSE3-NEXT: bsfl %ecx, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: bsfl %r11d, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: bsfl %edx, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: bsfl %r10d, %ecx
+; SSSE3-NEXT: movd %ecx, %xmm0
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSSE3-NEXT: bsfl %r9d, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: bsfl %edi, %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSSE3-NEXT: bsfl %r8d, %eax
+; SSSE3-NEXT: movd %eax, %xmm4
+; SSSE3-NEXT: movzbl -16(%rsp), %eax
+; SSSE3-NEXT: bsfl %eax, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: popq %rbx
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: testv16i8u:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pextrb $0, %xmm0, %ecx
+; SSE41-NEXT: bsfl %ecx, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrb $1, %eax, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: bsfl %eax, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: testv16i8u:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX-NEXT: bsfl %ecx, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: bsfl %eax, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 -1)
+ ret <16 x i8> %out
+}
+
+define <2 x i64> @foldv2i64() {
+; SSE-LABEL: foldv2i64:
+; SSE: # BB#0:
+; SSE-NEXT: movl $8, %eax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv2i64:
+; AVX: # BB#0:
+; AVX-NEXT: movl $8, %eax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
+ ret <2 x i64> %out
+}
+
+define <2 x i64> @foldv2i64u() {
+; SSE-LABEL: foldv2i64u:
+; SSE: # BB#0:
+; SSE-NEXT: movl $8, %eax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv2i64u:
+; AVX: # BB#0:
+; AVX-NEXT: movl $8, %eax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: retq
+ %out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
+ ret <2 x i64> %out
+}
+
+define <4 x i32> @foldv4i32() {
+; SSE-LABEL: foldv4i32:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 0)
+ ret <4 x i32> %out
+}
+
+define <4 x i32> @foldv4i32u() {
+; SSE-LABEL: foldv4i32u:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv4i32u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
+; AVX-NEXT: retq
+ %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 -1)
+ ret <4 x i32> %out
+}
+
+define <8 x i16> @foldv8i16() {
+; SSE-LABEL: foldv8i16:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 0)
+ ret <8 x i16> %out
+}
+
+define <8 x i16> @foldv8i16u() {
+; SSE-LABEL: foldv8i16u:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv8i16u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; AVX-NEXT: retq
+ %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 -1)
+ ret <8 x i16> %out
+}
+
+define <16 x i8> @foldv16i8() {
+; SSE-LABEL: foldv16i8:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 0)
+ ret <16 x i8> %out
+}
+
+define <16 x i8> @foldv16i8u() {
+; SSE-LABEL: foldv16i8u:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: foldv16i8u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; AVX-NEXT: retq
+ %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 -1)
+ ret <16 x i8> %out
+}
+
+declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1)
+declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1)
+declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>, i1)
+declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>, i1)
diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll
new file mode 100644
index 0000000000000..8f744f79f85fa
--- /dev/null
+++ b/test/CodeGen/X86/vector-tzcnt-256.ll
@@ -0,0 +1,1195 @@
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i64> @testv4i64(<4 x i64> %in) {
+; AVX1-LABEL: testv4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: movl $64, %ecx
+; AVX1-NEXT: cmoveq %rcx, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: cmoveq %rcx, %rax
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: cmoveq %rcx, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: cmoveq %rcx, %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: movl $64, %ecx
+; AVX2-NEXT: cmoveq %rcx, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: cmoveq %rcx, %rax
+; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: cmoveq %rcx, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: cmoveq %rcx, %rax
+; AVX2-NEXT: vmovq %rax, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %in, i1 0)
+ ret <4 x i64> %out
+}
+
+define <4 x i64> @testv4i64u(<4 x i64> %in) {
+; AVX1-LABEL: testv4i64u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv4i64u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: vmovq %rax, %xmm2
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: vmovq %rax, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %in, i1 -1)
+ ret <4 x i64> %out
+}
+
+define <8 x i32> @testv8i32(<8 x i32> %in) {
+; AVX1-LABEL: testv8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %ecx
+; AVX1-NEXT: movl $32, %eax
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: vmovd %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: vmovd %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm0, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: cmovel %eax, %ecx
+; AVX1-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrd $1, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %ecx
+; AVX2-NEXT: movl $32, %eax
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: vmovd %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm1
+; AVX2-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: vmovd %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $2, %xmm0, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: cmovel %eax, %ecx
+; AVX2-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %in, i1 0)
+ ret <8 x i32> %out
+}
+
+define <8 x i32> @testv8i32u(<8 x i32> %in) {
+; AVX1-LABEL: testv8i32u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrd $1, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vmovd %xmm0, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $2, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrd $3, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i32u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrd $1, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $2, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $3, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
+; AVX2-NEXT: vpextrd $1, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vmovd %xmm0, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $2, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrd $3, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %in, i1 -1)
+ ret <8 x i32> %out
+}
+
+define <16 x i16> @testv16i16(<16 x i16> %in) {
+; AVX1-LABEL: testv16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm1, %eax
+; AVX1-NEXT: bsfw %ax, %cx
+; AVX1-NEXT: movw $16, %ax
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vmovd %xmm1, %edx
+; AVX1-NEXT: bsfw %dx, %dx
+; AVX1-NEXT: cmovew %ax, %dx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm1, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm1, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm1, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm1, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm1, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm1, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vmovd %xmm0, %edx
+; AVX1-NEXT: bsfw %dx, %dx
+; AVX1-NEXT: cmovew %ax, %dx
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm0, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm0, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm0, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: cmovew %ax, %cx
+; AVX1-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm1, %eax
+; AVX2-NEXT: bsfw %ax, %cx
+; AVX2-NEXT: movw $16, %ax
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vmovd %xmm1, %edx
+; AVX2-NEXT: bsfw %dx, %dx
+; AVX2-NEXT: cmovew %ax, %dx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm1, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm1, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm1, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm1, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm1, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm1, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vmovd %xmm0, %edx
+; AVX2-NEXT: bsfw %dx, %dx
+; AVX2-NEXT: cmovew %ax, %dx
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $2, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $3, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $4, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm0, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $5, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm0, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm0, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: cmovew %ax, %cx
+; AVX2-NEXT: vpinsrw $7, %ecx, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %in, i1 0)
+ ret <16 x i16> %out
+}
+
+define <16 x i16> @testv16i16u(<16 x i16> %in) {
+; AVX1-LABEL: testv16i16u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm1, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm1, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm1, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm1, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm1, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm1, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm1, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vmovd %xmm0, %ecx
+; AVX1-NEXT: bsfw %cx, %cx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm0, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm0, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm0, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm0, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm0, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm0, %eax
+; AVX1-NEXT: bsfw %ax, %ax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i16u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm1, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm1, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm1, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm1, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm1, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm1, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm1, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX2-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vmovd %xmm0, %ecx
+; AVX2-NEXT: bsfw %cx, %cx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $2, %xmm0, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $3, %xmm0, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $4, %xmm0, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $5, %xmm0, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $6, %xmm0, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrw $7, %xmm0, %eax
+; AVX2-NEXT: bsfw %ax, %ax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %in, i1 -1)
+ ret <16 x i16> %out
+}
+
+define <32 x i8> @testv32i8(<32 x i8> %in) {
+; AVX1-LABEL: testv32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %edx
+; AVX1-NEXT: movl $32, %eax
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: movl $8, %ecx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpextrb $0, %xmm1, %esi
+; AVX1-NEXT: bsfl %esi, %esi
+; AVX1-NEXT: cmovel %eax, %esi
+; AVX1-NEXT: cmpl $32, %esi
+; AVX1-NEXT: cmovel %ecx, %esi
+; AVX1-NEXT: vmovd %esi, %xmm2
+; AVX1-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $2, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $3, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $4, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $5, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $6, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $7, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $8, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $9, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $10, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $11, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $12, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $13, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $14, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm1, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $15, %edx, %xmm2, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpextrb $0, %xmm0, %esi
+; AVX1-NEXT: bsfl %esi, %esi
+; AVX1-NEXT: cmovel %eax, %esi
+; AVX1-NEXT: cmpl $32, %esi
+; AVX1-NEXT: cmovel %ecx, %esi
+; AVX1-NEXT: vmovd %esi, %xmm2
+; AVX1-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $2, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $3, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $4, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $5, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $6, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $7, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $8, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $9, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $10, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $11, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $12, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $13, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $14, %edx, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm0, %edx
+; AVX1-NEXT: bsfl %edx, %edx
+; AVX1-NEXT: cmovel %eax, %edx
+; AVX1-NEXT: cmpl $32, %edx
+; AVX1-NEXT: cmovel %ecx, %edx
+; AVX1-NEXT: vpinsrb $15, %edx, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %edx
+; AVX2-NEXT: movl $32, %eax
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: movl $8, %ecx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpextrb $0, %xmm1, %esi
+; AVX2-NEXT: bsfl %esi, %esi
+; AVX2-NEXT: cmovel %eax, %esi
+; AVX2-NEXT: cmpl $32, %esi
+; AVX2-NEXT: cmovel %ecx, %esi
+; AVX2-NEXT: vmovd %esi, %xmm2
+; AVX2-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $2, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $3, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $4, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $5, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $6, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $7, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $8, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $9, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $10, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $11, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $12, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $13, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $14, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm1, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $15, %edx, %xmm2, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpextrb $0, %xmm0, %esi
+; AVX2-NEXT: bsfl %esi, %esi
+; AVX2-NEXT: cmovel %eax, %esi
+; AVX2-NEXT: cmpl $32, %esi
+; AVX2-NEXT: cmovel %ecx, %esi
+; AVX2-NEXT: vmovd %esi, %xmm2
+; AVX2-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $2, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $3, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $4, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $5, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $6, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $7, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $8, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $9, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $10, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $11, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $12, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $13, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $14, %edx, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm0, %edx
+; AVX2-NEXT: bsfl %edx, %edx
+; AVX2-NEXT: cmovel %eax, %edx
+; AVX2-NEXT: cmpl $32, %edx
+; AVX2-NEXT: cmovel %ecx, %edx
+; AVX2-NEXT: vpinsrb $15, %edx, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 0)
+ ret <32 x i8> %out
+}
+
+define <32 x i8> @testv32i8u(<32 x i8> %in) {
+; AVX1-LABEL: testv32i8u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: bsfl %ecx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: bsfl %eax, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv32i8u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: bsfl %ecx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: bsfl %eax, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 -1)
+ ret <32 x i8> %out
+}
+
+define <4 x i64> @foldv4i64() {
+; AVX-LABEL: foldv4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
+; AVX-NEXT: retq
+ %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
+ ret <4 x i64> %out
+}
+
+define <4 x i64> @foldv4i64u() {
+; AVX-LABEL: foldv4i64u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
+; AVX-NEXT: retq
+ %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
+ ret <4 x i64> %out
+}
+
+define <8 x i32> @foldv8i32() {
+; AVX-LABEL: foldv8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; AVX-NEXT: retq
+ %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
+ ret <8 x i32> %out
+}
+
+define <8 x i32> @foldv8i32u() {
+; AVX-LABEL: foldv8i32u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; AVX-NEXT: retq
+ %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
+ ret <8 x i32> %out
+}
+
+define <16 x i16> @foldv16i16() {
+; AVX-LABEL: foldv16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; AVX-NEXT: retq
+ %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
+ ret <16 x i16> %out
+}
+
+define <16 x i16> @foldv16i16u() {
+; AVX-LABEL: foldv16i16u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; AVX-NEXT: retq
+ %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
+ ret <16 x i16> %out
+}
+
+define <32 x i8> @foldv32i8() {
+; AVX-LABEL: foldv32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; AVX-NEXT: retq
+ %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
+ ret <32 x i8> %out
+}
+
+define <32 x i8> @foldv32i8u() {
+; AVX-LABEL: foldv32i8u:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; AVX-NEXT: retq
+ %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
+ ret <32 x i8> %out
+}
+
+declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>, i1)
+declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>, i1)
+declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>, i1)
+declare <32 x i8> @llvm.cttz.v32i8(<32 x i8>, i1)
diff --git a/test/CodeGen/X86/vector-zext.ll b/test/CodeGen/X86/vector-zext.ll
index 42781830ff2fb..c64e17442675e 100644
--- a/test/CodeGen/X86/vector-zext.ll
+++ b/test/CodeGen/X86/vector-zext.ll
@@ -9,7 +9,6 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: # kill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
; SSE2-NEXT: pand .LCPI0_0(%rip), %xmm1
@@ -19,7 +18,6 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: # kill
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
; SSSE3-NEXT: pand .LCPI0_0(%rip), %xmm1
@@ -156,7 +154,6 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %z) {
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: # kill
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: pand .LCPI3_0(%rip), %xmm1
@@ -166,7 +163,6 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %z) {
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: # kill
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSSE3-NEXT: pand .LCPI3_0(%rip), %xmm1
@@ -334,7 +330,6 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: # kill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE2-NEXT: retq
@@ -343,7 +338,6 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: # kill
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSSE3-NEXT: retq
@@ -366,7 +360,6 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
;
; AVX2-LABEL: shuf_zext_8i16_to_8i32:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: # kill
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
entry:
@@ -380,7 +373,6 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: # kill
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE2-NEXT: retq
@@ -389,7 +381,6 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: # kill
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSSE3-NEXT: retq
@@ -413,7 +404,6 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
;
; AVX2-LABEL: shuf_zext_4i32_to_4i64:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: # kill
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/win32-eh-states.ll b/test/CodeGen/X86/win32-eh-states.ll
new file mode 100644
index 0000000000000..8db127df6da73
--- /dev/null
+++ b/test/CodeGen/X86/win32-eh-states.ll
@@ -0,0 +1,112 @@
+; RUN: llc -mtriple=i686-pc-windows-msvc < %s | FileCheck %s
+
+; Based on this source:
+; extern "C" void may_throw(int);
+; void f() {
+; try {
+; may_throw(1);
+; try {
+; may_throw(2);
+; } catch (int) {
+; may_throw(3);
+; }
+; } catch (int) {
+; may_throw(4);
+; }
+; }
+
+%rtti.TypeDescriptor2 = type { i8**, i8*, [3 x i8] }
+%eh.CatchHandlerType = type { i32, i8* }
+
+declare void @may_throw(i32)
+declare i32 @__CxxFrameHandler3(...)
+declare void @llvm.eh.begincatch(i8*, i8*)
+declare void @llvm.eh.endcatch()
+declare i32 @llvm.eh.typeid.for(i8*)
+
+$"\01??_R0H@8" = comdat any
+
+@"\01??_7type_info@@6B@" = external constant i8*
+@"\01??_R0H@8" = linkonce_odr global %rtti.TypeDescriptor2 { i8** @"\01??_7type_info@@6B@", i8* null, [3 x i8] c".H\00" }, comdat
+@llvm.eh.handlertype.H.0 = private unnamed_addr constant %eh.CatchHandlerType { i32 0, i8* bitcast (%rtti.TypeDescriptor2* @"\01??_R0H@8" to i8*) }, section "llvm.metadata"
+
+define void @f() #0 {
+entry:
+ invoke void @may_throw(i32 1)
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %entry
+ invoke void @may_throw(i32 2)
+ to label %try.cont.9 unwind label %lpad.1
+
+try.cont.9: ; preds = %invoke.cont.3, %invoke.cont, %catch.7
+ ; FIXME: Something about our CFG breaks TailDuplication. This empy asm blocks
+ ; it so we can focus on testing the state numbering.
+ call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"()
+ ret void
+
+lpad: ; preds = %catch, %entry
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
+ catch %eh.CatchHandlerType* @llvm.eh.handlertype.H.0
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = extractvalue { i8*, i32 } %0, 1
+ br label %catch.dispatch.4
+
+lpad.1: ; preds = %invoke.cont
+ %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
+ catch i8* bitcast (%eh.CatchHandlerType* @llvm.eh.handlertype.H.0 to i8*)
+ %4 = extractvalue { i8*, i32 } %3, 0
+ %5 = extractvalue { i8*, i32 } %3, 1
+ %6 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (%eh.CatchHandlerType* @llvm.eh.handlertype.H.0 to i8*)) #3
+ %matches = icmp eq i32 %5, %6
+ br i1 %matches, label %catch, label %catch.dispatch.4
+
+catch.dispatch.4: ; preds = %lpad.1, %lpad
+ %exn.slot.0 = phi i8* [ %4, %lpad.1 ], [ %1, %lpad ]
+ %ehselector.slot.0 = phi i32 [ %5, %lpad.1 ], [ %2, %lpad ]
+ %.pre = tail call i32 @llvm.eh.typeid.for(i8* bitcast (%eh.CatchHandlerType* @llvm.eh.handlertype.H.0 to i8*)) #3
+ %matches6 = icmp eq i32 %ehselector.slot.0, %.pre
+ br i1 %matches6, label %catch.7, label %eh.resume
+
+catch.7: ; preds = %catch.dispatch.4
+ tail call void @llvm.eh.begincatch(i8* %exn.slot.0, i8* null) #3
+ tail call void @may_throw(i32 4)
+ tail call void @llvm.eh.endcatch() #3
+ br label %try.cont.9
+
+catch: ; preds = %lpad.1
+ tail call void @llvm.eh.begincatch(i8* %4, i8* null) #3
+ invoke void @may_throw(i32 3)
+ to label %invoke.cont.3 unwind label %lpad
+
+invoke.cont.3: ; preds = %catch
+ tail call void @llvm.eh.endcatch() #3
+ br label %try.cont.9
+
+eh.resume: ; preds = %catch.dispatch.4
+ %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn.slot.0, 0
+ %lpad.val.12 = insertvalue { i8*, i32 } %lpad.val, i32 %ehselector.slot.0, 1
+ resume { i8*, i32 } %lpad.val.12
+}
+
+; CHECK-LABEL: _f:
+; CHECK: movl $-1, [[state:[-0-9]+]](%ebp)
+; CHECK: movl $___ehhandler$f, {{.*}}
+;
+; CHECK: movl $0, [[state]](%ebp)
+; CHECK: movl $1, (%esp)
+; CHECK: calll _may_throw
+;
+; CHECK: movl $1, [[state]](%ebp)
+; CHECK: movl $2, (%esp)
+; CHECK: calll _may_throw
+
+; CHECK-LABEL: _f.catch:
+; CHECK: movl $4, Lf$frame_escape_{{[0-9]+.*}}
+; CHECK: movl $4, (%esp)
+; CHECK: calll _may_throw
+
+; CHECK-LABEL: _f.catch.1:
+; CHECK: movl $3, Lf$frame_escape_{{[0-9]+.*}}
+; CHECK: movl $3, (%esp)
+; CHECK: calll _may_throw
diff --git a/test/CodeGen/X86/win32-eh.ll b/test/CodeGen/X86/win32-eh.ll
index 4d3c34ed6c175..42c9d9e2240de 100644
--- a/test/CodeGen/X86/win32-eh.ll
+++ b/test/CodeGen/X86/win32-eh.ll
@@ -19,16 +19,18 @@ catchall:
}
; CHECK-LABEL: _use_except_handler3:
+; CHECK: pushl %ebp
+; CHECK: movl %esp, %ebp
; CHECK: subl ${{[0-9]+}}, %esp
-; CHECK: movl $-1, 12(%esp)
-; CHECK: movl $L__ehtable$use_except_handler3, 8(%esp)
-; CHECK: movl $__except_handler3, 4(%esp)
+; CHECK: movl $-1, -4(%ebp)
+; CHECK: movl $L__ehtable$use_except_handler3, -8(%ebp)
+; CHECK: leal -16(%ebp), %[[node:[^ ,]*]]
+; CHECK: movl $__except_handler3, -12(%ebp)
; CHECK: movl %fs:0, %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], (%esp)
-; CHECK: leal (%esp), %[[node:[^ ,]*]]
+; CHECK: movl %[[next]], -16(%ebp)
; CHECK: movl %[[node]], %fs:0
; CHECK: calll _may_throw_or_crash
-; CHECK: movl (%esp), %[[next:[^ ,]*]]
+; CHECK: movl -16(%ebp), %[[next:[^ ,]*]]
; CHECK: movl %[[next]], %fs:0
; CHECK: retl
@@ -44,17 +46,21 @@ catchall:
}
; CHECK-LABEL: _use_except_handler4:
+; CHECK: pushl %ebp
+; CHECK: movl %esp, %ebp
; CHECK: subl ${{[0-9]+}}, %esp
-; CHECK: movl %esp, (%esp)
-; CHECK: movl $-2, 20(%esp)
-; CHECK: movl $L__ehtable$use_except_handler4, 4(%esp)
-; CHECK: leal 8(%esp), %[[node:[^ ,]*]]
-; CHECK: movl $__except_handler4, 12(%esp)
+; CHECK: movl %esp, -24(%ebp)
+; CHECK: movl $-2, -4(%ebp)
+; CHECK: movl $L__ehtable$use_except_handler4, %[[lsda:[^ ,]*]]
+; CHECK: xorl ___security_cookie, %[[lsda]]
+; CHECK: movl %[[lsda]], -8(%ebp)
+; CHECK: leal -16(%ebp), %[[node:[^ ,]*]]
+; CHECK: movl $__except_handler4, -12(%ebp)
; CHECK: movl %fs:0, %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], 8(%esp)
+; CHECK: movl %[[next]], -16(%ebp)
; CHECK: movl %[[node]], %fs:0
; CHECK: calll _may_throw_or_crash
-; CHECK: movl 8(%esp), %[[next:[^ ,]*]]
+; CHECK: movl -16(%ebp), %[[next:[^ ,]*]]
; CHECK: movl %[[next]], %fs:0
; CHECK: retl
@@ -73,19 +79,34 @@ catchall:
}
; CHECK-LABEL: _use_CxxFrameHandler3:
+; CHECK: pushl %ebp
+; CHECK: movl %esp, %ebp
; CHECK: subl ${{[0-9]+}}, %esp
-; CHECK: movl %esp, (%esp)
-; CHECK: movl $-1, 12(%esp)
-; CHECK: leal 4(%esp), %[[node:[^ ,]*]]
-; CHECK: movl $___ehhandler$use_CxxFrameHandler3, 8(%esp)
+; CHECK: movl %esp, -16(%ebp)
+; CHECK: movl $-1, -4(%ebp)
+; CHECK: leal -12(%ebp), %[[node:[^ ,]*]]
+; CHECK: movl $___ehhandler$use_CxxFrameHandler3, -8(%ebp)
; CHECK: movl %fs:0, %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], 4(%esp)
+; CHECK: movl %[[next]], -12(%ebp)
; CHECK: movl %[[node]], %fs:0
+; CHECK: movl $0, -4(%ebp)
; CHECK: calll _may_throw_or_crash
-; CHECK: movl 4(%esp), %[[next:[^ ,]*]]
+; CHECK: movl -12(%ebp), %[[next:[^ ,]*]]
; CHECK: movl %[[next]], %fs:0
; CHECK: retl
+; CHECK: .section .xdata,"dr"
+; CHECK-LABEL: L__ehtable$use_CxxFrameHandler3:
+; CHECK-NEXT: .long 429065506
+; CHECK-NEXT: .long 2
+; CHECK-NEXT: .long ($stateUnwindMap$use_CxxFrameHandler3)
+; CHECK-NEXT: .long 1
+; CHECK-NEXT: .long ($tryMap$use_CxxFrameHandler3)
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .long 1
+
; CHECK-LABEL: ___ehhandler$use_CxxFrameHandler3:
; CHECK: movl $L__ehtable$use_CxxFrameHandler3, %eax
; CHECK: jmp ___CxxFrameHandler3 # TAILCALL
diff --git a/test/DebugInfo/Inputs/dwarfdump.elf-mips64-64-bit-dwarf b/test/DebugInfo/Inputs/dwarfdump.elf-mips64-64-bit-dwarf
new file mode 100755
index 0000000000000..5dbfea52212b2
--- /dev/null
+++ b/test/DebugInfo/Inputs/dwarfdump.elf-mips64-64-bit-dwarf
Binary files differ
diff --git a/test/DebugInfo/Inputs/invalid.elf b/test/DebugInfo/Inputs/invalid.elf
new file mode 100644
index 0000000000000..23acad80bc968
--- /dev/null
+++ b/test/DebugInfo/Inputs/invalid.elf
Binary files differ
diff --git a/test/DebugInfo/Inputs/invalid.elf.2 b/test/DebugInfo/Inputs/invalid.elf.2
new file mode 100644
index 0000000000000..16528a55a9313
--- /dev/null
+++ b/test/DebugInfo/Inputs/invalid.elf.2
@@ -0,0 +1 @@
+ELF'L:þB“ELFï \ No newline at end of file
diff --git a/test/DebugInfo/Inputs/invalid.elf.3 b/test/DebugInfo/Inputs/invalid.elf.3
new file mode 100644
index 0000000000000..f49446000044b
--- /dev/null
+++ b/test/DebugInfo/Inputs/invalid.elf.3
@@ -0,0 +1 @@
+ELF‚Lþ'ELFB \ No newline at end of file
diff --git a/test/DebugInfo/Inputs/test-multiple-macho.o b/test/DebugInfo/Inputs/test-multiple-macho.o
new file mode 100644
index 0000000000000..428a1af6826fb
--- /dev/null
+++ b/test/DebugInfo/Inputs/test-multiple-macho.o
Binary files differ
diff --git a/test/DebugInfo/Inputs/test-simple-macho.o b/test/DebugInfo/Inputs/test-simple-macho.o
new file mode 100644
index 0000000000000..8ae4154626c36
--- /dev/null
+++ b/test/DebugInfo/Inputs/test-simple-macho.o
Binary files differ
diff --git a/test/DebugInfo/X86/expressions.ll b/test/DebugInfo/X86/expressions.ll
new file mode 100644
index 0000000000000..52c1b08f8b071
--- /dev/null
+++ b/test/DebugInfo/X86/expressions.ll
@@ -0,0 +1,110 @@
+; REQUIRES: object-emission
+; RUN: llc -mtriple x86_64-apple-darwin14.0.0-elf -filetype=obj %s -o %t
+; RUN: llc -mtriple x86_64-apple-darwin14.0.0-elf -O0 -filetype=obj %s -o %t0
+; RUN: llvm-dwarfdump -debug-dump=loc %t | FileCheck %s
+; RUN: llvm-dwarfdump -debug-dump=loc %t0 | FileCheck -check-prefix CHECK-O0 %s
+
+; CHECK: 0x00000000: Beginning address offset: 0x0000000000000000
+; CHECK: Ending address offset: 0x[[END:[0-9a-f]+]]
+; CHECK: Location description:
+; CHECK-NOT: 75 00 55
+; CHECK-SAME: 55
+; CHECK: 0x00000023: Beginning address offset: 0x0000000000000000
+; CHECK: Ending address offset: 0x{{.*}}[[END]]
+; CHECK: Location description: 75 08 9f
+; CHECK: 0x00000048: Beginning address offset: 0x0000000000000000
+; CHECK: Ending address offset: 0x{{.*}}[[END]]
+; CHECK: Location description: 75 10 9f
+; CHECK: 0x0000006d: Beginning address offset: 0x0000000000000000
+; CHECK: Ending address offset: 0x{{.*}}[[END]]
+; CHECK: Location description: 75 18
+
+
+; CHECK-O0: 0x00000000: Beginning address offset: 0x0000000000000000
+; CHECK-O0: Ending address offset: 0x000000000000001b
+; CHECK-O0: Location description: 55
+; CHECK-O0: Beginning address offset: 0x000000000000001b
+; CHECK-O0: Ending address offset: 0x0000000000000024
+; CHECK-O0: Location description: 54
+; CHECK-O0: Beginning address offset: 0x0000000000000024
+; CHECK-O0: Ending address offset: 0x0000000000000025
+; CHECK-O0: Location description: 77 78 23 00
+; CHECK-O0: 0x0000004c: Beginning address offset: 0x0000000000000000
+; CHECK-O0: Ending address offset: 0x000000000000001b
+; CHECK-O0: Location description: 75 08 9f
+; CHECK-O0: Beginning address offset: 0x000000000000001b
+; CHECK-O0: Ending address offset: 0x0000000000000024
+; CHECK-O0: Location description: 74 08 9f
+; CHECK-O0: Beginning address offset: 0x0000000000000024
+; CHECK-O0: Ending address offset: 0x0000000000000025
+; CHECK-O0: Location description: 77 78 23 08
+; CHECK-O0: 0x0000009c: Beginning address offset: 0x0000000000000000
+; CHECK-O0: Ending address offset: 0x000000000000001b
+; CHECK-O0: Location description: 75 10 9f
+; CHECK-O0: Beginning address offset: 0x000000000000001b
+; CHECK-O0: Ending address offset: 0x0000000000000024
+; CHECK-O0: Location description: 74 10 9f
+; CHECK-O0: Beginning address offset: 0x0000000000000024
+; CHECK-O0: Ending address offset: 0x0000000000000025
+; CHECK-O0: Location description: 77 78 23 08 23 08
+; CHECK-O0: 0x000000ee: Beginning address offset: 0x0000000000000000
+; CHECK-O0: Ending address offset: 0x000000000000001b
+; CHECK-O0: Location description: 75 18
+; CHECK-O0: Beginning address offset: 0x000000000000001b
+; CHECK-O0: Ending address offset: 0x0000000000000024
+; CHECK-O0: Location description: 74 18
+; CHECK-O0: Beginning address offset: 0x0000000000000024
+; CHECK-O0: Ending address offset: 0x0000000000000025
+; CHECK-O0: Location description: 77 78 23 10 23 08 06
+
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #0
+
+define float @foo(float* %args, float *%args2)
+{
+ call void @llvm.dbg.value(metadata float* %args, i64 0, metadata !11, metadata !12), !dbg !19
+ call void @llvm.dbg.value(metadata float* %args, i64 0, metadata !13, metadata !14), !dbg !19
+ call void @llvm.dbg.value(metadata float* %args, i64 0, metadata !15, metadata !16), !dbg !19
+ call void @llvm.dbg.value(metadata float* %args, i64 0, metadata !17, metadata !18), !dbg !19
+ %a = load float, float* %args, !dbg !19
+ %bptr = getelementptr float, float* %args, i32 1, !dbg !19
+ %b = load float, float* %bptr, !dbg !19
+ %cptr = getelementptr float, float* %args, i32 2, !dbg !19
+ %c = load float, float* %cptr, !dbg !19
+ %dptr = getelementptr float, float* %args, i32 3, !dbg !19
+ %d = load float, float* %dptr, !dbg !19
+ %ret1 = fadd float %a, %b, !dbg !19
+ %ret2 = fadd float %c, %d, !dbg !19
+ call void @llvm.dbg.value(metadata float* %args2, i64 0, metadata !11, metadata !12), !dbg !19
+ call void @llvm.dbg.value(metadata float* %args2, i64 0, metadata !13, metadata !14), !dbg !19
+ call void @llvm.dbg.value(metadata float* %args2, i64 0, metadata !15, metadata !16), !dbg !19
+ call void @llvm.dbg.value(metadata float* %args2, i64 0, metadata !17, metadata !18), !dbg !19
+ %ret = fsub float %ret1, %ret2, !dbg !19
+ ret float %ret, !dbg !19
+}
+
+attributes #0 = { nounwind readnone }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.dbg.cu = !{!2}
+
+!0 = !{i32 2, !"Dwarf Version", i32 4}
+!1 = !{i32 1, !"Debug Info Version", i32 3}
+
+!2 = !DICompileUnit(language: DW_LANG_C89, file: !3, producer: "byHand", isOptimized: true, runtimeVersion: 0, emissionKind: 1, enums: !4, retainedTypes: !4, subprograms: !5, globals: !4, imports: !4)
+!3 = !DIFile(filename: "expressions", directory: ".")
+!4 = !{}
+!5 = !{!6}
+!6 = !DISubprogram(name: "foo", linkageName: "foo", scope: null, file: !3, type: !7, isLocal: false, isDefinition: true, isOptimized: true, function: float (float*, float*)* @foo, variables: !4)
+!7 = !DISubroutineType(types: !8)
+!8 = !{!10, !10}
+!9 = !DIBasicType(name: "float", size: 4, align: 4, encoding: DW_ATE_float)
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !9, size: 64, align: 64)
+!11 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "a", arg: 1, scope: !6, file: !3, line: 1, type: !10)
+!12 = !DIExpression(DW_OP_plus, 0)
+!13 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "b", arg: 2, scope: !6, file: !3, line: 1, type: !10)
+!14 = !DIExpression(DW_OP_plus, 8)
+!15 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "c", arg: 3, scope: !6, file: !3, line: 1, type: !10)
+!16 = !DIExpression(DW_OP_plus, 8, DW_OP_plus, 8)
+!17 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "d", arg: 4, scope: !6, file: !3, line: 1, type: !9)
+!18 = !DIExpression(DW_OP_plus, 16, DW_OP_plus, 8, DW_OP_deref)
+!19 = !DILocation(line: 1, scope: !6)
diff --git a/test/DebugInfo/X86/inlined-indirect-value.ll b/test/DebugInfo/X86/inlined-indirect-value.ll
new file mode 100644
index 0000000000000..7f95691fbb8fd
--- /dev/null
+++ b/test/DebugInfo/X86/inlined-indirect-value.ll
@@ -0,0 +1,81 @@
+; RUN: llc -filetype=asm -asm-verbose=0 < %s | FileCheck %s
+
+; "1" from line 09 in the snippet below shouldn't be marked with location of "1"
+; from line 04. Instead it will have location inside main() (real location is
+; just erased, so it won't be perfectly accurate).
+
+; options: -g -O3
+; 01 volatile int x;
+; 02 int y;
+; 03 static __attribute__((always_inline)) int f1() {
+; 04 if (x * 3 < 14) return 1;
+; 05 return 2;
+; 06 }
+; 07 int main() {
+; 08 x = f1();
+; 09 x = x ? 1 : 2;
+; 10 }
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@x = common global i32 0, align 4
+@y = common global i32 0, align 4
+
+define i32 @main() {
+; CHECK: .loc 1 {{[89]}}
+; CHECK-NOT: .loc
+; CHECK: movl $1
+
+entry:
+ %0 = load volatile i32, i32* @x, align 4, !dbg !16, !tbaa !19
+ %mul.i = mul nsw i32 %0, 3, !dbg !23
+ %cmp.i = icmp slt i32 %mul.i, 14, !dbg !24
+ %..i = select i1 %cmp.i, i32 1, i32 2, !dbg !25
+ store volatile i32 %..i, i32* @x, align 4, !dbg !27, !tbaa !19
+ %1 = load volatile i32, i32* @x, align 4, !dbg !28, !tbaa !19
+ %tobool = icmp ne i32 %1, 0, !dbg !28
+ br i1 %tobool, label %select.end, label %select.mid
+
+select.mid: ; preds = %entry
+ br label %select.end
+
+select.end: ; preds = %entry, %select.mid
+ %cond = phi i32 [ 1, %entry ], [ 2, %select.mid ]
+ store volatile i32 %cond, i32* @x, align 4, !dbg !29, !tbaa !19
+ ret i32 0, !dbg !30
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!13, !14}
+
+!0 = !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "", isOptimized: true, runtimeVersion: 0, emissionKind: 1, enums: !2, retainedTypes: !2, subprograms: !3, globals: !9, imports: !2)
+!1 = !DIFile(filename: "inline-break.c", directory: "/build/dir")
+!2 = !{}
+!3 = !{!4, !8}
+!4 = !DISubprogram(name: "main", scope: !1, file: !1, line: 7, type: !5, isLocal: false, isDefinition: true, scopeLine: 7, isOptimized: true, function: i32 ()* @main, variables: !2)
+!5 = !DISubroutineType(types: !6)
+!6 = !{!7}
+!7 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!8 = !DISubprogram(name: "f1", scope: !1, file: !1, line: 3, type: !5, isLocal: true, isDefinition: true, scopeLine: 3, isOptimized: true, variables: !2)
+!9 = !{!10, !12}
+!10 = !DIGlobalVariable(name: "x", scope: !0, file: !1, line: 1, type: !11, isLocal: false, isDefinition: true, variable: i32* @x)
+!11 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !7)
+!12 = !DIGlobalVariable(name: "y", scope: !0, file: !1, line: 2, type: !7, isLocal: false, isDefinition: true, variable: i32* @y)
+!13 = !{i32 2, !"Dwarf Version", i32 4}
+!14 = !{i32 2, !"Debug Info Version", i32 3}
+!16 = !DILocation(line: 4, column: 9, scope: !17, inlinedAt: !18)
+!17 = distinct !DILexicalBlock(scope: !8, file: !1, line: 4, column: 9)
+!18 = distinct !DILocation(line: 8, column: 9, scope: !4)
+!19 = !{!20, !20, i64 0}
+!20 = !{!"int", !21, i64 0}
+!21 = !{!"omnipotent char", !22, i64 0}
+!22 = !{!"Simple C/C++ TBAA"}
+!23 = !DILocation(line: 4, column: 11, scope: !17, inlinedAt: !18)
+!24 = !DILocation(line: 4, column: 15, scope: !17, inlinedAt: !18)
+!25 = !DILocation(line: 4, column: 21, scope: !26, inlinedAt: !18)
+!26 = !DILexicalBlockFile(scope: !17, file: !1, discriminator: 1)
+!27 = !DILocation(line: 8, column: 7, scope: !4)
+!28 = !DILocation(line: 9, column: 9, scope: !4)
+!29 = !DILocation(line: 9, column: 7, scope: !4)
+!30 = !DILocation(line: 10, column: 1, scope: !4)
diff --git a/test/DebugInfo/debuglineinfo-macho.test b/test/DebugInfo/debuglineinfo-macho.test
new file mode 100644
index 0000000000000..0c63dd6e8b0db
--- /dev/null
+++ b/test/DebugInfo/debuglineinfo-macho.test
@@ -0,0 +1,43 @@
+# Check that relocations get applied
+RUN: llvm-dwarfdump %p/Inputs/test-simple-macho.o | FileCheck %s
+RUN: llvm-dwarfdump %p/Inputs/test-multiple-macho.o | FileCheck %s
+RUN: llvm-rtdyld -printline %p/Inputs/test-multiple-macho.o | FileCheck %s
+RUN: llvm-rtdyld -printobjline %p/Inputs/test-multiple-macho.o | FileCheck %s
+
+CHECK-NOT: error: failed to compute relocation: X86_64_RELOC_UNSIGNED
+
+# Check that relocations get applied correctly
+RUN: llvm-rtdyld -printobjline %p/Inputs/test-simple-macho.o \
+RUN: | FileCheck %s -check-prefix TEST_SIMPLE
+RUN: llvm-rtdyld -printline %p/Inputs/test-simple-macho.o \
+RUN: | FileCheck %s -check-prefix TEST_SIMPLE
+RUN: llvm-rtdyld -printobjline %p/Inputs/test-multiple-macho.o \
+RUN: | FileCheck %s -check-prefix TEST_MULTIPLE
+RUN: llvm-rtdyld -printline %p/Inputs/test-multiple-macho.o \
+RUN: | FileCheck %s -check-prefix TEST_MULTIPLE
+
+TEST_SIMPLE: Function: _foo, Size = 11
+TEST_SIMPLE-NEXT: Line info @ 0: simple.c, line:1
+TEST_SIMPLE-NEXT: Line info @ 7: simple.c, line:2
+TEST_SIMPLE-NOT: Line info @ 11: simple.c, line:2
+
+TEST_MULTIPLE: Function: _bar, Size = 48
+TEST_MULTIPLE-NEXT: Line info @ 0: multiple.c, line:5
+TEST_MULTIPLE-NEXT: Line info @ 7: multiple.c, line:6
+TEST_MULTIPLE-NEXT: Line info @ 16: multiple.c, line:9
+TEST_MULTIPLE-NEXT: Line info @ 21: multiple.c, line:9
+TEST_MULTIPLE-NEXT: Line info @ 26: multiple.c, line:7
+TEST_MULTIPLE-NEXT: Line info @ 33: multiple.c, line:10
+TEST_MULTIPLE-NOT: Line info @ 48: multiple.c, line:12
+TEST_MULTIPLE-NEXT: Function: _foo, Size = 16
+TEST_MULTIPLE-NEXT: Line info @ 0: multiple.c, line:1
+TEST_MULTIPLE-NEXT: Line info @ 7: multiple.c, line:2
+TEST_MULTIPLE-NOT: Line info @ 16: multiple.c, line:5
+TEST_MULTIPLE-NEXT: Function: _fubar, Size = 46
+TEST_MULTIPLE-NEXT: Line info @ 0: multiple.c, line:12
+TEST_MULTIPLE-NEXT: Line info @ 7: multiple.c, line:13
+TEST_MULTIPLE-NEXT: Line info @ 12: multiple.c, line:17
+TEST_MULTIPLE-NEXT: Line info @ 25: multiple.c, line:15
+TEST_MULTIPLE-NEXT: Line info @ 34: multiple.c, line:19
+TEST_MULTIPLE-NEXT: Line info @ 41: multiple.c, line:21
+TEST_MULTIPLE-NOT: Line info @ 46: multiple.c, line:21
diff --git a/test/DebugInfo/debuglineinfo.test b/test/DebugInfo/debuglineinfo.test
index 96a32285be46b..720ba12ed7614 100644
--- a/test/DebugInfo/debuglineinfo.test
+++ b/test/DebugInfo/debuglineinfo.test
@@ -18,7 +18,6 @@ TEST_INLINE-NEXT: Line info @ 165: test-inline.cpp, line:35
TEST_INLINE-NEXT: Function: _Z3foov, Size = 3
TEST_INLINE-NEXT: Line info @ 0: test-inline.cpp, line:28
TEST_INLINE-NEXT: Line info @ 2: test-inline.cpp, line:29
-TEST_INLINE-NEXT: Line info @ 3: test-inline.cpp, line:29
TEST_INLINE-NEXT: Function: main, Size = 146
TEST_INLINE-NEXT: Line info @ 0: test-inline.cpp, line:39
TEST_INLINE-NEXT: Line info @ 21: test-inline.cpp, line:41
@@ -29,7 +28,6 @@ TEST_INLINE-NEXT: Line info @ 90: test-inline.cpp, line:45
TEST_INLINE-NEXT: Line info @ 95: test-inline.cpp, line:46
TEST_INLINE-NEXT: Line info @ 114: test-inline.cpp, line:48
TEST_INLINE-NEXT: Line info @ 141: test-inline.cpp, line:49
-TEST_INLINE-NEXT: Line info @ 146: test-inline.cpp, line:49
; This test checks the case where all code is in a single section.
TEST_PARAMETERS: Function: _Z15test_parametersPfPA2_dR11char_structPPitm, Size = 170
@@ -49,5 +47,4 @@ TEST_PARAMETERS-NEXT: Line info @ 90: test-parameters.cpp, line:45
TEST_PARAMETERS-NEXT: Line info @ 95: test-parameters.cpp, line:46
TEST_PARAMETERS-NEXT: Line info @ 114: test-parameters.cpp, line:48
TEST_PARAMETERS-NEXT: Line info @ 141: test-parameters.cpp, line:49
-TEST_PARAMETERS-NEXT: Line info @ 146: test-parameters.cpp, line:49
diff --git a/test/DebugInfo/dwarfdump-64-bit-dwarf.test b/test/DebugInfo/dwarfdump-64-bit-dwarf.test
new file mode 100644
index 0000000000000..0a24414c27bba
--- /dev/null
+++ b/test/DebugInfo/dwarfdump-64-bit-dwarf.test
@@ -0,0 +1,15 @@
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump.elf-mips64-64-bit-dwarf \
+RUN: --debug-dump=line | FileCheck %s
+
+# FIXME: llvm-dwarfdump's support for 64-bit dwarf is currently limited to
+# .debug_line.
+
+CHECK: total_length: 0x00000212
+CHECK: version: 2
+CHECK:prologue_length: 0x000001ab
+CHECK:min_inst_length: 1
+CHECK:default_is_stmt: 1
+CHECK: line_base: -5
+CHECK: line_range: 14
+CHECK: opcode_base: 13
+CHECK: is_stmt end_sequence
diff --git a/test/DebugInfo/dwarfdump-invalid.test b/test/DebugInfo/dwarfdump-invalid.test
new file mode 100644
index 0000000000000..da5b23e30ceb9
--- /dev/null
+++ b/test/DebugInfo/dwarfdump-invalid.test
@@ -0,0 +1,6 @@
+; Verify that llvm-dwarfdump doesn't crash on broken input files.
+
+RUN: llvm-dwarfdump %p/Inputs/invalid.elf 2>&1 | FileCheck %s --check-prefix=INVALID-ELF
+RUN: llvm-dwarfdump %p/Inputs/invalid.elf.2 2>&1 | FileCheck %s --check-prefix=INVALID-ELF
+RUN: llvm-dwarfdump %p/Inputs/invalid.elf.3 2>&1 | FileCheck %s --check-prefix=INVALID-ELF
+INVALID-ELF: Invalid data was encountered while parsing the file
diff --git a/test/ExecutionEngine/MCJIT/cross-module-sm-pic-a.ll b/test/ExecutionEngine/MCJIT/cross-module-sm-pic-a.ll
index eb414249bdc36..04331990db338 100644
--- a/test/ExecutionEngine/MCJIT/cross-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/MCJIT/cross-module-sm-pic-a.ll
@@ -1,5 +1,5 @@
; RUN: %lli -extra-module=%p/Inputs/cross-module-b.ll -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, i686, i386
+; XFAIL: mips-, mipsel-, i686, i386
declare i32 @FB()
diff --git a/test/ExecutionEngine/MCJIT/eh-lg-pic.ll b/test/ExecutionEngine/MCJIT/eh-lg-pic.ll
index 9277ec4a8cae3..222196f81c4b2 100644
--- a/test/ExecutionEngine/MCJIT/eh-lg-pic.ll
+++ b/test/ExecutionEngine/MCJIT/eh-lg-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -relocation-model=pic -code-model=large %s
-; XFAIL: cygwin, win32, mingw, mips, i686, i386, aarch64, arm, asan, msan
+; XFAIL: cygwin, win32, mingw, mips-, mipsel-, i686, i386, aarch64, arm, asan, msan
declare i8* @__cxa_allocate_exception(i64)
declare void @__cxa_throw(i8*, i8*, i8*)
declare i32 @__gxx_personality_v0(...)
diff --git a/test/ExecutionEngine/MCJIT/eh-sm-pic.ll b/test/ExecutionEngine/MCJIT/eh-sm-pic.ll
index 37fb628cf0a2c..c73dcca5ee1c7 100644
--- a/test/ExecutionEngine/MCJIT/eh-sm-pic.ll
+++ b/test/ExecutionEngine/MCJIT/eh-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -relocation-model=pic -code-model=small %s
-; XFAIL: cygwin, win32, mingw, mips, i686, i386, darwin, aarch64, arm, asan, msan
+; XFAIL: cygwin, win32, mingw, mips-, mipsel-, i686, i386, darwin, aarch64, arm, asan, msan
declare i8* @__cxa_allocate_exception(i64)
declare void @__cxa_throw(i8*, i8*, i8*)
declare i32 @__gxx_personality_v0(...)
diff --git a/test/ExecutionEngine/MCJIT/multi-module-sm-pic-a.ll b/test/ExecutionEngine/MCJIT/multi-module-sm-pic-a.ll
index 01faecc6495b7..7e5710dbf9c98 100644
--- a/test/ExecutionEngine/MCJIT/multi-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/MCJIT/multi-module-sm-pic-a.ll
@@ -1,5 +1,5 @@
; RUN: %lli -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, i686, i386
+; XFAIL: mips-, mipsel-, i686, i386
declare i32 @FB()
diff --git a/test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll b/test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll
index d47fc6cffbb24..415fd25409d66 100644
--- a/test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll
@@ -1,5 +1,5 @@
; RUN: %lli -extra-module=%p/Inputs/cross-module-b.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target%exeext -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, i686, i386, arm
+; XFAIL: mips-, mipsel-, i686, i386, arm
declare i32 @FB()
diff --git a/test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll b/test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll
index d248c4b2a94ab..3e020dc853a6f 100644
--- a/test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll
@@ -1,5 +1,5 @@
; RUN: %lli -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target%exeext -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, i686, i386, arm
+; XFAIL: mips-, mipsel-, i686, i386, arm
declare i32 @FB()
diff --git a/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-sm-pic.ll b/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-sm-pic.ll
index f9184b4077bbc..e350b85a8bce7 100644
--- a/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-sm-pic.ll
+++ b/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -remote-mcjit -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, aarch64, arm, i686, i386
+; XFAIL: mips-, mipsel-, aarch64, arm, i686, i386
@count = global i32 1, align 4
diff --git a/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-sm-pic.ll b/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-sm-pic.ll
index d9a4faa8e6bd4..7162e927de0ab 100644
--- a/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-sm-pic.ll
+++ b/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -remote-mcjit -O0 -relocation-model=pic -code-model=small %s
-; XFAIL: mips, aarch64, arm, i686, i386
+; XFAIL: mips-, mipsel-, aarch64, arm, i686, i386
@.str = private unnamed_addr constant [6 x i8] c"data1\00", align 1
@ptr = global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), align 4
diff --git a/test/ExecutionEngine/MCJIT/stubs-sm-pic.ll b/test/ExecutionEngine/MCJIT/stubs-sm-pic.ll
index 6eebe44e3ff35..46545ce534237 100644
--- a/test/ExecutionEngine/MCJIT/stubs-sm-pic.ll
+++ b/test/ExecutionEngine/MCJIT/stubs-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -disable-lazy-compilation=false -relocation-model=pic -code-model=small %s
-; XFAIL: mips, i686, i386, aarch64, arm
+; XFAIL: mips-, mipsel-, i686, i386, aarch64, arm
define i32 @main() nounwind {
entry:
diff --git a/test/ExecutionEngine/MCJIT/test-global-init-nonzero-sm-pic.ll b/test/ExecutionEngine/MCJIT/test-global-init-nonzero-sm-pic.ll
index 8ae496d0fab06..e54135fc7cbc4 100644
--- a/test/ExecutionEngine/MCJIT/test-global-init-nonzero-sm-pic.ll
+++ b/test/ExecutionEngine/MCJIT/test-global-init-nonzero-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, aarch64, arm, i686, i386
+; XFAIL: mips-, mipsel-, aarch64, arm, i686, i386
@count = global i32 1, align 4
diff --git a/test/ExecutionEngine/MCJIT/test-ptr-reloc-sm-pic.ll b/test/ExecutionEngine/MCJIT/test-ptr-reloc-sm-pic.ll
index d50ba9dd5a0a2..eea6fde2e2caf 100644
--- a/test/ExecutionEngine/MCJIT/test-ptr-reloc-sm-pic.ll
+++ b/test/ExecutionEngine/MCJIT/test-ptr-reloc-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -O0 -relocation-model=pic -code-model=small %s
-; XFAIL: mips, aarch64, arm, i686, i386
+; XFAIL: mips-, mipsel-, aarch64, arm, i686, i386
@.str = private unnamed_addr constant [6 x i8] c"data1\00", align 1
@ptr = global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), align 4
diff --git a/test/ExecutionEngine/OrcMCJIT/cross-module-sm-pic-a.ll b/test/ExecutionEngine/OrcMCJIT/cross-module-sm-pic-a.ll
index 4f5afd0ed4752..9af82e7486fb5 100644
--- a/test/ExecutionEngine/OrcMCJIT/cross-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/OrcMCJIT/cross-module-sm-pic-a.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -extra-module=%p/Inputs/cross-module-b.ll -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, i686, i386
+; XFAIL: mips-, mipsel-, i686, i386
declare i32 @FB()
diff --git a/test/ExecutionEngine/OrcMCJIT/eh-lg-pic.ll b/test/ExecutionEngine/OrcMCJIT/eh-lg-pic.ll
index 32b53c4847bc6..47674dd9cdc33 100644
--- a/test/ExecutionEngine/OrcMCJIT/eh-lg-pic.ll
+++ b/test/ExecutionEngine/OrcMCJIT/eh-lg-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -relocation-model=pic -code-model=large %s
-; XFAIL: cygwin, win32, mingw, mips, i686, i386, aarch64, arm, asan, msan
+; XFAIL: cygwin, win32, mingw, mips-, mipsel-, i686, i386, aarch64, arm, asan, msan
declare i8* @__cxa_allocate_exception(i64)
declare void @__cxa_throw(i8*, i8*, i8*)
declare i32 @__gxx_personality_v0(...)
diff --git a/test/ExecutionEngine/OrcMCJIT/eh-sm-pic.ll b/test/ExecutionEngine/OrcMCJIT/eh-sm-pic.ll
index a54795ead56dc..c279720cdb034 100644
--- a/test/ExecutionEngine/OrcMCJIT/eh-sm-pic.ll
+++ b/test/ExecutionEngine/OrcMCJIT/eh-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -relocation-model=pic -code-model=small %s
-; XFAIL: cygwin, win32, mingw, mips, i686, i386, darwin, aarch64, arm, asan, msan
+; XFAIL: cygwin, win32, mingw, mips-, mipsel-, i686, i386, darwin, aarch64, arm, asan, msan
declare i8* @__cxa_allocate_exception(i64)
declare void @__cxa_throw(i8*, i8*, i8*)
declare i32 @__gxx_personality_v0(...)
diff --git a/test/ExecutionEngine/OrcMCJIT/multi-module-sm-pic-a.ll b/test/ExecutionEngine/OrcMCJIT/multi-module-sm-pic-a.ll
index 80b8e163d5d68..0052c01265296 100644
--- a/test/ExecutionEngine/OrcMCJIT/multi-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/OrcMCJIT/multi-module-sm-pic-a.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, i686, i386
+; XFAIL: mips-, mipsel-, i686, i386
declare i32 @FB()
diff --git a/test/ExecutionEngine/OrcMCJIT/remote/cross-module-sm-pic-a.ll b/test/ExecutionEngine/OrcMCJIT/remote/cross-module-sm-pic-a.ll
index c8979043857e4..4326fc1e526b5 100644
--- a/test/ExecutionEngine/OrcMCJIT/remote/cross-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/OrcMCJIT/remote/cross-module-sm-pic-a.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -extra-module=%p/Inputs/cross-module-b.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target%exeext -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, i686, i386, arm
+; XFAIL: mips-, mipsel-, i686, i386, arm
declare i32 @FB()
diff --git a/test/ExecutionEngine/OrcMCJIT/remote/multi-module-sm-pic-a.ll b/test/ExecutionEngine/OrcMCJIT/remote/multi-module-sm-pic-a.ll
index 41812d25169c4..18a2d7e87d098 100644
--- a/test/ExecutionEngine/OrcMCJIT/remote/multi-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/OrcMCJIT/remote/multi-module-sm-pic-a.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target%exeext -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, i686, i386, arm
+; XFAIL: mips-, mipsel-, i686, i386, arm
declare i32 @FB()
diff --git a/test/ExecutionEngine/OrcMCJIT/remote/test-global-init-nonzero-sm-pic.ll b/test/ExecutionEngine/OrcMCJIT/remote/test-global-init-nonzero-sm-pic.ll
index ec9e1ae1a03c8..f1e93133b226a 100644
--- a/test/ExecutionEngine/OrcMCJIT/remote/test-global-init-nonzero-sm-pic.ll
+++ b/test/ExecutionEngine/OrcMCJIT/remote/test-global-init-nonzero-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -remote-mcjit -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, aarch64, arm, i686, i386
+; XFAIL: mips-, mipsel-, aarch64, arm, i686, i386
@count = global i32 1, align 4
diff --git a/test/ExecutionEngine/OrcMCJIT/remote/test-ptr-reloc-sm-pic.ll b/test/ExecutionEngine/OrcMCJIT/remote/test-ptr-reloc-sm-pic.ll
index 61321ac1b9475..cac800ef5e767 100644
--- a/test/ExecutionEngine/OrcMCJIT/remote/test-ptr-reloc-sm-pic.ll
+++ b/test/ExecutionEngine/OrcMCJIT/remote/test-ptr-reloc-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -remote-mcjit -O0 -relocation-model=pic -code-model=small %s
-; XFAIL: mips, aarch64, arm, i686, i386
+; XFAIL: mips-, mipsel-, aarch64, arm, i686, i386
@.str = private unnamed_addr constant [6 x i8] c"data1\00", align 1
@ptr = global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), align 4
diff --git a/test/ExecutionEngine/OrcMCJIT/stubs-sm-pic.ll b/test/ExecutionEngine/OrcMCJIT/stubs-sm-pic.ll
index f354a0de09359..ec4183bc724d2 100644
--- a/test/ExecutionEngine/OrcMCJIT/stubs-sm-pic.ll
+++ b/test/ExecutionEngine/OrcMCJIT/stubs-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -disable-lazy-compilation=false -relocation-model=pic -code-model=small %s
-; XFAIL: mips, i686, i386, aarch64, arm
+; XFAIL: mips-, mipsel-, i686, i386, aarch64, arm
define i32 @main() nounwind {
entry:
diff --git a/test/ExecutionEngine/OrcMCJIT/test-global-init-nonzero-sm-pic.ll b/test/ExecutionEngine/OrcMCJIT/test-global-init-nonzero-sm-pic.ll
index 7f66aec77078b..c9b6cf755b7cf 100644
--- a/test/ExecutionEngine/OrcMCJIT/test-global-init-nonzero-sm-pic.ll
+++ b/test/ExecutionEngine/OrcMCJIT/test-global-init-nonzero-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -relocation-model=pic -code-model=small %s > /dev/null
-; XFAIL: mips, aarch64, arm, i686, i386
+; XFAIL: mips-, mipsel-, aarch64, arm, i686, i386
@count = global i32 1, align 4
diff --git a/test/ExecutionEngine/OrcMCJIT/test-ptr-reloc-sm-pic.ll b/test/ExecutionEngine/OrcMCJIT/test-ptr-reloc-sm-pic.ll
index 6e0a425623b7f..d0bb070110c10 100644
--- a/test/ExecutionEngine/OrcMCJIT/test-ptr-reloc-sm-pic.ll
+++ b/test/ExecutionEngine/OrcMCJIT/test-ptr-reloc-sm-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli -jit-kind=orc-mcjit -O0 -relocation-model=pic -code-model=small %s
-; XFAIL: mips, aarch64, arm, i686, i386
+; XFAIL: mips-, mipsel-, aarch64, arm, i686, i386
@.str = private unnamed_addr constant [6 x i8] c"data1\00", align 1
@ptr = global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), align 4
diff --git a/test/ExecutionEngine/RuntimeDyld/Mips/ELF_Mips64r2N64_PIC_relocations.s b/test/ExecutionEngine/RuntimeDyld/Mips/ELF_Mips64r2N64_PIC_relocations.s
new file mode 100644
index 0000000000000..1d8d293a26a4f
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/Mips/ELF_Mips64r2N64_PIC_relocations.s
@@ -0,0 +1,164 @@
+# RUN: llvm-mc -triple=mips64el-unknown-linux -relocation-model=pic -code-model=small -filetype=obj -o %T/test_ELF_Mips64N64.o %s
+# RUN: llc -mtriple=mips64el-unknown-linux -relocation-model=pic -filetype=obj -o %T/test_ELF_ExternalFunction_Mips64N64.o %S/Inputs/ExternalFunction.ll
+# RUN: llvm-rtdyld -triple=mips64el-unknown-linux -verify -map-section test_ELF_Mips64N64.o,.text=0x1000 -map-section test_ELF_ExternalFunction_Mips64N64.o,.text=0x10000 -check=%s %/T/test_ELF_Mips64N64.o %T/test_ELF_ExternalFunction_Mips64N64.o
+
+ .data
+# Test R_MIPS_PC32 relocation.
+# rtdyld-check: *{4}(R_MIPS_PC32) = (foo - R_MIPS_PC32)[31:0]
+R_MIPS_PC32:
+ .word foo-.
+# rtdyld-check: *{4}(R_MIPS_PC32 + 4) = (foo - tmp1)[31:0]
+tmp1:
+ .4byte foo-tmp1
+
+ .text
+ .abicalls
+ .section .mdebug.abi64,"",@progbits
+ .nan legacy
+ .file "ELF_Mips64N64_PIC_relocations.ll"
+ .text
+ .globl bar
+ .align 3
+ .type bar,@function
+ .set nomicromips
+ .set nomips16
+ .ent bar
+bar:
+ .frame $fp,40,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+ daddiu $sp, $sp, -40
+ sd $ra, 32($sp)
+ sd $fp, 24($sp)
+ move $fp, $sp
+ sd $4, 16($fp)
+ lb $2, 0($4)
+ sd $4, 8($fp)
+
+# Test R_MIPS_26 relocation.
+# rtdyld-check: decode_operand(insn1, 0)[25:0] = foo
+insn1:
+ jal foo
+ nop
+
+# Test R_MIPS_PC16 relocation.
+# rtdyld-check: decode_operand(insn2, 1)[15:0] = foo - insn2
+insn2:
+ bal foo
+ nop
+
+ move $sp, $fp
+ ld $ra, 32($sp)
+ ld $fp, 24($sp)
+ daddiu $sp, $sp, 32
+ jr $ra
+ nop
+ .set at
+ .set macro
+ .set reorder
+ .end bar
+$func_end0:
+ .size bar, ($func_end0)-bar
+
+ .globl main
+ .align 3
+ .type main,@function
+ .set nomicromips
+ .set nomips16
+ .ent main
+main:
+ .frame $fp,32,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+ daddiu $sp, $sp, -32
+ sd $ra, 24($sp)
+ sd $fp, 16($sp)
+ sd $gp, 8($sp)
+ move $fp, $sp
+
+# Check upper 16-bits of offset between the address of main function
+# and the global offset table.
+# rtdyld-check: decode_operand(insn3, 1)[15:0] = ((section_addr(test_ELF_Mips64N64.o, .got) + 0x7ff0) - main + 0x8000)[31:16]
+insn3:
+ lui $1, %hi(%neg(%gp_rel(main)))
+ daddu $1, $1, $25
+
+# Check lower 16-bits of offset between the address of main function
+# and the global offset table.
+# rtdyld-check: decode_operand(insn4, 2)[15:0] = ((section_addr(test_ELF_Mips64N64.o, .got) + 0x7ff0) - main)[15:0]
+insn4:
+ daddiu $1, $1, %lo(%neg(%gp_rel(main)))
+ sw $zero, 4($fp)
+
+# $gp register contains address of the .got section + 0x7FF0. 0x7FF0 is
+# the offset of $gp from the beginning of the .got section. Check that we are
+# loading address of the page pointer from correct offset. In this case
+# the page pointer is the first entry in the .got section, so offset will be
+# 0 - 0x7FF0.
+# rtdyld-check: decode_operand(insn5, 2)[15:0] = 0x8010
+#
+# Check that the global offset table contains the page pointer.
+# rtdyld-check: *{8}(section_addr(test_ELF_Mips64N64.o, .got)) = (_str + 0x8000) & 0xffffffffffff0000
+insn5:
+ ld $25, %got_page(_str)($1)
+
+# Check the offset of _str from the page pointer.
+# rtdyld-check: decode_operand(insn6, 2)[15:0] = _str[15:0]
+insn6:
+ daddiu $25, $25, %got_ofst(_str)
+
+# Check that we are loading address of var from correct offset. In this case
+# var is the second entry in the .got section, so offset will be 8 - 0x7FF0.
+# rtdyld-check: decode_operand(insn7, 2)[15:0] = 0x8018
+#
+# Check that the global offset table contains the address of the var.
+# rtdyld-check: *{8}(section_addr(test_ELF_Mips64N64.o, .got) + 8) = var
+insn7:
+ ld $2, %got_disp(var)($1)
+ sd $25, 0($2)
+
+# Check that we are loading address of bar from correct offset. In this case
+# bar is the third entry in the .got section, so offset will be 16 - 0x7FF0.
+# rtdyld-check: decode_operand(insn8, 2)[15:0] = 0x8020
+#
+# Check that the global offset table contains the address of the bar.
+# rtdyld-check: *{8}(section_addr(test_ELF_Mips64N64.o, .got) + 16) = bar
+insn8:
+ ld $2, %call16(bar)($1)
+
+ move $4, $25
+ move $gp, $1
+ move $25, $2
+ jalr $25
+ nop
+ move $sp, $fp
+ ld $gp, 8($sp)
+ ld $fp, 16($sp)
+ ld $ra, 24($sp)
+ daddiu $sp, $sp, 32
+ jr $ra
+ nop
+ .set at
+ .set macro
+ .set reorder
+ .end main
+$func_end1:
+ .size main, ($func_end1)-main
+
+ .type _str,@object
+ .section .rodata.str1.1,"aMS",@progbits,1
+_str:
+ .asciz "test"
+ .size _str, 5
+
+ .type var,@object
+ .comm var,8,8
+
+ .section ".note.GNU-stack","",@progbits
+ .text
diff --git a/test/ExecutionEngine/RuntimeDyld/Mips/ELF_O32_PIC_relocations.s b/test/ExecutionEngine/RuntimeDyld/Mips/ELF_O32_PIC_relocations.s
new file mode 100644
index 0000000000000..a4b145ab51720
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/Mips/ELF_O32_PIC_relocations.s
@@ -0,0 +1,50 @@
+# RUN: llvm-mc -triple=mipsel-unknown-linux -relocation-model=pic -code-model=small -filetype=obj -o %T/test_ELF_O32.o %s
+# RUN: llc -mtriple=mipsel-unknown-linux -relocation-model=pic -filetype=obj -o %T/test_ELF_ExternalFunction_O32.o %S/Inputs/ExternalFunction.ll
+# RUN: llvm-rtdyld -triple=mipsel-unknown-linux -verify -map-section test_ELF_O32.o,.text=0x1000 -map-section test_ELF_ExternalFunction_O32.o,.text=0x10000 -check=%s %T/test_ELF_O32.o %T/test_ELF_ExternalFunction_O32.o
+
+# RUN: llvm-mc -triple=mips-unknown-linux -relocation-model=pic -code-model=small -filetype=obj -o %T/test_ELF_O32.o %s
+# RUN: llc -mtriple=mips-unknown-linux -relocation-model=pic -filetype=obj -o %/T/test_ELF_ExternalFunction_O32.o %S/Inputs/ExternalFunction.ll
+# RUN: llvm-rtdyld -triple=mips-unknown-linux -verify -map-section test_ELF_O32.o,.text=0x1000 -map-section test_ELF_ExternalFunction_O32.o,.text=0x10000 -check=%s %T/test_ELF_O32.o %T/test_ELF_ExternalFunction_O32.o
+
+ .data
+# rtdyld-check: *{4}R_MIPS_32 = foo[31:0]
+R_MIPS_32:
+ .word foo
+# rtdyld-check: *{4}(R_MIPS_32+4) = foo[31:0]
+ .4byte foo
+# rtdyld-check: *{4}(R_MIPS_PC32) = (foo - R_MIPS_PC32)[31:0]
+R_MIPS_PC32:
+ .word foo-.
+# rtdyld-check: *{4}(R_MIPS_PC32 + 4) = (foo - tmp1)[31:0]
+tmp1:
+ .4byte foo-tmp1
+
+ .text
+ .abicalls
+ .nan legacy
+ .text
+ .set nomicromips
+ .set nomips16
+ .set noreorder
+ .set nomacro
+ .set noat
+
+ .align 3
+ .globl bar
+ .type bar,@function
+bar:
+# rtdyld-check: decode_operand(R_MIPS_26, 0)[27:0] = stub_addr(test_ELF_O32.o, .text, foo)[27:0]
+# rtdyld-check: decode_operand(R_MIPS_26, 0)[1:0] = 0
+R_MIPS_26:
+ j foo
+ nop
+
+# rtdyld-check: decode_operand(R_MIPS_HI16, 1)[15:0] = foo[31:16]
+R_MIPS_HI16:
+ lui $1, %hi(foo)
+
+# rtdyld-check: decode_operand(R_MIPS_LO16, 1)[15:0] = foo[15:0]
+R_MIPS_LO16:
+ lui $1, %lo(foo)
+
+ .size bar, .-bar
diff --git a/test/ExecutionEngine/RuntimeDyld/Mips/Inputs/ExternalFunction.ll b/test/ExecutionEngine/RuntimeDyld/Mips/Inputs/ExternalFunction.ll
new file mode 100644
index 0000000000000..a59d68c1e1869
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/Mips/Inputs/ExternalFunction.ll
@@ -0,0 +1,4 @@
+define void @foo() {
+entry:
+ ret void
+}
diff --git a/test/ExecutionEngine/RuntimeDyld/Mips/lit.local.cfg b/test/ExecutionEngine/RuntimeDyld/Mips/lit.local.cfg
new file mode 100644
index 0000000000000..a3183a25afaa9
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/Mips/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'Mips' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/ExecutionEngine/RuntimeDyld/X86/MachO_i386_DynNoPIC_relocations.s b/test/ExecutionEngine/RuntimeDyld/X86/MachO_i386_DynNoPIC_relocations.s
index f427b985b58c3..6b2fe9532c2f3 100644
--- a/test/ExecutionEngine/RuntimeDyld/X86/MachO_i386_DynNoPIC_relocations.s
+++ b/test/ExecutionEngine/RuntimeDyld/X86/MachO_i386_DynNoPIC_relocations.s
@@ -9,9 +9,9 @@ bar:
tmp0$pb:
popl %eax
# Test section difference relocation to non-lazy ptr section.
-# rtdyld-check: decode_operand(inst1, 4) = x$non_lazy_ptr - tmp0$pb
+# rtdyld-check: decode_operand(inst1, 4) = x$non_lazy_ptr - tmp0$pb + 8
inst1:
- movl x$non_lazy_ptr-tmp0$pb(%eax), %eax
+ movl (x$non_lazy_ptr-tmp0$pb)+8(%eax), %eax
movl (%eax), %ebx
# Test VANILLA relocation to jump table.
diff --git a/test/Instrumentation/AddressSanitizer/do-not-instrument-llvm-metadata-darwin.ll b/test/Instrumentation/AddressSanitizer/do-not-instrument-llvm-metadata-darwin.ll
new file mode 100644
index 0000000000000..7617dbde0b7e3
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/do-not-instrument-llvm-metadata-darwin.ll
@@ -0,0 +1,12 @@
+; This test checks that we are not instrumenting globals in llvm.metadata.
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+@.str_noinst = private unnamed_addr constant [4 x i8] c"aaa\00", section "llvm.metadata"
+@.str_inst = private unnamed_addr constant [4 x i8] c"aaa\00"
+
+; CHECK-NOT: {{asan_gen.*str_noinst}}
+; CHECK: {{asan_gen.*str_inst}}
+; CHECK: @asan.module_ctor
diff --git a/test/Instrumentation/AddressSanitizer/instrument-dynamic-allocas.ll b/test/Instrumentation/AddressSanitizer/instrument-dynamic-allocas.ll
index 18a86a9b88f70..ceaf0e6fcfb6d 100644
--- a/test/Instrumentation/AddressSanitizer/instrument-dynamic-allocas.ll
+++ b/test/Instrumentation/AddressSanitizer/instrument-dynamic-allocas.ll
@@ -2,17 +2,13 @@
; -asan-instrument-allocas=1
; RUN: opt < %s -asan -asan-module -asan-instrument-allocas=1 -S | FileCheck %s --check-prefix=CHECK-ALLOCA
-; RUN: opt < %s -asan -asan-module -asan-instrument-allocas=0 -S | FileCheck %s --check-prefix=CHECK-NOALLOCA
-; RUN: opt < %s -asan -asan-module -S | FileCheck %s --check-prefix=CHECK-NOALLOCA
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
define void @foo(i32 %len) sanitize_address {
entry:
-; CHECK-ALLOCA: store i32 -892679478
-; CHECK-ALLOCA: store i32 -875836469
-; CHECK-NOALLOCA-NOT: store i32 -892679478
-; CHECK-NOALLOCA-NOT: store i32 -875836469
+; CHECK-ALLOCA: __asan_alloca_poison
+; CHECK-ALLOCA: __asan_allocas_unpoison
%0 = alloca i32, align 4
%1 = alloca i8*
store volatile i32 %len, i32* %0, align 4
diff --git a/test/Instrumentation/AddressSanitizer/undecidable-dynamic-alloca-1.ll b/test/Instrumentation/AddressSanitizer/undecidable-dynamic-alloca-1.ll
deleted file mode 100644
index c67fb50146309..0000000000000
--- a/test/Instrumentation/AddressSanitizer/undecidable-dynamic-alloca-1.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; Test that undecidable dynamic allocas are skipped by ASan.
-
-; RUN: opt < %s -asan -asan-module -asan-instrument-allocas=1 -S | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @g(i64 %n) sanitize_address {
-entry:
- %cmp = icmp sgt i64 %n, 100
- br i1 %cmp, label %do_alloca, label %done
-
-do_alloca:
-; CHECK-NOT: store i32 -892679478
- %0 = alloca i8, i64 %n, align 1
- call void @f(i8* %0)
- br label %done
-
-done:
- ret void
-}
-
-declare void @f(i8*)
-
diff --git a/test/Instrumentation/InstrProfiling/PR23499.ll b/test/Instrumentation/InstrProfiling/PR23499.ll
new file mode 100644
index 0000000000000..5aae735120bee
--- /dev/null
+++ b/test/Instrumentation/InstrProfiling/PR23499.ll
@@ -0,0 +1,21 @@
+;; Check that data associated with linkonce odr functions are placed in
+;; the same comdat section as their associated function.
+
+; RUN: opt < %s -mtriple=x86_64-apple-macosx10.10.0 -instrprof -S | FileCheck %s
+; RUN: opt < %s -mtriple=x86_64-unknown-linux -instrprof -S | FileCheck %s
+
+$_Z3barIvEvv = comdat any
+
+@__llvm_profile_name__Z3barIvEvv = linkonce_odr hidden constant [11 x i8] c"_Z3barIvEvv", align 1
+
+; CHECK: @__llvm_profile_name__Z3barIvEvv = linkonce_odr hidden constant [11 x i8] c"_Z3barIvEvv", section "{{.*}}__llvm_prf_names", comdat($_Z3barIvEvv), align 1
+; CHECK: @__llvm_profile_counters__Z3barIvEvv = linkonce_odr hidden global [1 x i64] zeroinitializer, section "{{.*}}__llvm_prf_cnts", comdat($_Z3barIvEvv), align 8
+; CHECK: @__llvm_profile_data__Z3barIvEvv = linkonce_odr hidden constant { i32, i32, i64, i8*, i64* } { i32 11, i32 1, i64 0, i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__llvm_profile_name__Z3barIvEvv, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_profile_counters__Z3barIvEvv, i32 0, i32 0) }, section "{{.*}}__llvm_prf_data", comdat($_Z3barIvEvv), align 8
+
+declare void @llvm.instrprof.increment(i8*, i64, i32, i32) #1
+
+define linkonce_odr void @_Z3barIvEvv() comdat {
+entry:
+ call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__llvm_profile_name__Z3barIvEvv, i32 0, i32 0), i64 0, i32 1, i32 0)
+ ret void
+}
diff --git a/test/MC/AArch64/armv8.1a-atomic.s b/test/MC/AArch64/armv8.1a-atomic.s
new file mode 100644
index 0000000000000..bcfd3e7d166a5
--- /dev/null
+++ b/test/MC/AArch64/armv8.1a-atomic.s
@@ -0,0 +1,184 @@
+// RUN: not llvm-mc -triple aarch64-none-linux-gnu -mattr=+v8.1a -show-encoding < %s 2> %t | FileCheck %s
+// RUN: FileCheck --check-prefix=CHECK-ERROR <%t %s
+ .text
+
+ //8 bits
+ casb w0, w1, [x2]
+ casab w0, w1, [x2]
+ caslb w0, w1, [x2]
+ casalb w0, w1, [x2]
+
+//CHECK: casb w0, w1, [x2] // encoding: [0x41,0x7c,0xa0,0x08]
+//CHECK: casab w0, w1, [x2] // encoding: [0x41,0x7c,0xe0,0x08]
+//CHECK: caslb w0, w1, [x2] // encoding: [0x41,0xfc,0xa0,0x08]
+//CHECK: casalb w0, w1, [x2] // encoding: [0x41,0xfc,0xe0,0x08]
+
+ casb w0, w1, [w2]
+ casalb x0, x1, [x2]
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: casb w0, w1, [w2]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: casalb x0, x1, [x2]
+//CHECK-ERROR: ^
+
+ //16 bits
+ cash w0, w1, [x2]
+ casah w0, w1, [x2]
+ caslh w0, w1, [x2]
+ casalh w0, w1, [x2]
+
+//CHECK: cash w0, w1, [x2] // encoding: [0x41,0x7c,0xa0,0x48]
+//CHECK: casah w0, w1, [x2] // encoding: [0x41,0x7c,0xe0,0x48]
+//CHECK: caslh w0, w1, [x2] // encoding: [0x41,0xfc,0xa0,0x48]
+//CHECK: casalh w0, w1, [x2] // encoding: [0x41,0xfc,0xe0,0x48]
+
+ //32 bits
+ cas w0, w1, [x2]
+ casa w0, w1, [x2]
+ casl w0, w1, [x2]
+ casal w0, w1, [x2]
+
+//CHECK: cas w0, w1, [x2] // encoding: [0x41,0x7c,0xa0,0x88]
+//CHECK: casa w0, w1, [x2] // encoding: [0x41,0x7c,0xe0,0x88]
+//CHECK: casl w0, w1, [x2] // encoding: [0x41,0xfc,0xa0,0x88]
+//CHECK: casal w0, w1, [x2] // encoding: [0x41,0xfc,0xe0,0x88]
+
+ cas w0, w1, [w2]
+ casl w0, x1, [x2]
+
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: cas w0, w1, [w2]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: casl w0, x1, [x2]
+//CHECK-ERROR: ^
+
+ //64 bits
+ cas x0, x1, [x2]
+ casa x0, x1, [x2]
+ casl x0, x1, [x2]
+ casal x0, x1, [x2]
+
+//CHECK: cas x0, x1, [x2] // encoding: [0x41,0x7c,0xa0,0xc8]
+//CHECK: casa x0, x1, [x2] // encoding: [0x41,0x7c,0xe0,0xc8]
+//CHECK: casl x0, x1, [x2] // encoding: [0x41,0xfc,0xa0,0xc8]
+//CHECK: casal x0, x1, [x2] // encoding: [0x41,0xfc,0xe0,0xc8]
+
+ casa x0, x1, [w2]
+ casal x0, w1, [x2]
+
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: casa x0, x1, [w2]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: casal x0, w1, [x2]
+//CHECK-ERROR: ^
+
+ // LD<OP> intructions
+ ldadda x0, x1, [x2]
+ ldclrl x0, x1, [x2]
+ ldeoral x0, x1, [x2]
+ ldset x0, x1, [x2]
+ ldsmaxa w0, w1, [x2]
+ ldsminlb w0, w1, [x2]
+ ldumaxalh w0, w1, [x2]
+ ldumin w0, w1, [x2]
+ ldsminb w2, w3, [x5]
+//CHECK: ldadda x0, x1, [x2] // encoding: [0x41,0x00,0xa0,0xf8]
+//CHECK: ldclrl x0, x1, [x2] // encoding: [0x41,0x10,0x60,0xf8]
+//CHECK: ldeoral x0, x1, [x2] // encoding: [0x41,0x20,0xe0,0xf8]
+//CHECK: ldset x0, x1, [x2] // encoding: [0x41,0x30,0x20,0xf8]
+//CHECK: ldsmaxa w0, w1, [x2] // encoding: [0x41,0x40,0xa0,0xb8]
+//CHECK: ldsminlb w0, w1, [x2] // encoding: [0x41,0x50,0x60,0x38]
+//CHECK: ldumaxalh w0, w1, [x2] // encoding: [0x41,0x60,0xe0,0x78]
+//CHECK: ldumin w0, w1, [x2] // encoding: [0x41,0x70,0x20,0xb8]
+//CHECK: ldsminb w2, w3, [x5] // encoding: [0xa3,0x50,0x22,0x38]
+
+ // ST<OP> intructions: aliases to LD<OP>
+ stADDlb w0, [x2]
+ stclrlh w0, [x2]
+ steorl w0, [x2]
+ stsetl x0, [x2]
+ stsmaxb w0, [x2]
+ stsminh w0, [x2]
+ stumax w0, [x2]
+ stumin x0, [x2]
+ stsminl x29, [sp]
+//CHECK: staddlb w0, [x2] // encoding: [0x5f,0x00,0x60,0x38]
+//CHECK: stclrlh w0, [x2] // encoding: [0x5f,0x10,0x60,0x78]
+//CHECK: steorl w0, [x2] // encoding: [0x5f,0x20,0x60,0xb8]
+//CHECK: stsetl x0, [x2] // encoding: [0x5f,0x30,0x60,0xf8]
+//CHECK: stsmaxb w0, [x2] // encoding: [0x5f,0x40,0x20,0x38]
+//CHECK: stsminh w0, [x2] // encoding: [0x5f,0x50,0x20,0x78]
+//CHECK: stumax w0, [x2] // encoding: [0x5f,0x60,0x20,0xb8]
+//CHECK: stumin x0, [x2] // encoding: [0x5f,0x70,0x20,0xf8]
+//CHECK: stsminl x29, [sp] // encoding: [0xff,0x53,0x7d,0xf8]
+
+
+ ldsmax x0, x1, [w2]
+ ldeorl w0, w1, [w2]
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: ldsmax x0, x1, [w2]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: ldeorl w0, w1, [w2]
+//CHECK-ERROR: ^
+
+ //SWP instruction
+ swp x0, x1, [x2]
+ swpb w0, w1, [x2]
+ swplh w0, w1, [x2]
+ swpal x0, x1, [sp]
+//CHECK: swp x0, x1, [x2] // encoding: [0x41,0x80,0x20,0xf8]
+//CHECK: swpb w0, w1, [x2] // encoding: [0x41,0x80,0x20,0x38]
+//CHECK: swplh w0, w1, [x2] // encoding: [0x41,0x80,0x60,0x78]
+//CHECK: swpal x0, x1, [sp] // encoding: [0xe1,0x83,0xe0,0xf8]
+
+ swp x0, x1, [w2]
+ swp x0, x1, [xzr]
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: swp x0, x1, [w2]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: swp x0, x1, [xzr]
+//CHECK-ERROR: ^
+
+ //CASP instruction
+ casp x0, x1, x2, x3, [x4]
+ casp w0, w1, w2, w3, [x4]
+//CHECK: casp x0, x1, x2, x3, [x4] // encoding: [0x82,0x7c,0x20,0x48]
+//CHECK: casp w0, w1, w2, w3, [x4] // encoding: [0x82,0x7c,0x20,0x08]
+
+ casp x1, x2, x4, x5, [x6]
+ casp x0, x1, x3, x4, [x5]
+ casp x0, x2, x4, x5, [x6]
+ casp x0, x1, x2, x4, [x5]
+ casp x0, w1, x2, x3, [x5]
+ casp w0, x1, x2, x3, [x5]
+ casp w0, x1, w2, w3, [x5]
+ casp x0, x1, w2, w3, [x5]
+//CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+//CHECK-ERROR: casp x1, x2, x4, x5, [x6]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+//CHECK-ERROR: casp x0, x1, x3, x4, [x5]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: expected second odd register of a consecutive same-size even/odd register pair
+//CHECK-ERROR: casp x0, x2, x4, x5, [x6]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: expected second odd register of a consecutive same-size even/odd register pair
+//CHECK-ERROR: casp x0, x1, x2, x4, [x5]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: expected second odd register of a consecutive same-size even/odd register pair
+//CHECK-ERROR: casp x0, w1, x2, x3, [x5]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: expected second odd register of a consecutive same-size even/odd register pair
+//CHECK-ERROR: casp w0, x1, x2, x3, [x5]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: expected second odd register of a consecutive same-size even/odd register pair
+//CHECK-ERROR: casp w0, x1, w2, w3, [x5]
+//CHECK-ERROR: ^
+//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: casp x0, x1, w2, w3, [x5]
+//CHECK-ERROR: ^
diff --git a/test/MC/AArch64/basic-a64-diagnostics.s b/test/MC/AArch64/basic-a64-diagnostics.s
index 1d7ba710a9a18..bf7db132b44ab 100644
--- a/test/MC/AArch64/basic-a64-diagnostics.s
+++ b/test/MC/AArch64/basic-a64-diagnostics.s
@@ -3494,6 +3494,7 @@
msr ID_MMFR1_EL1, x12
msr ID_MMFR2_EL1, x12
msr ID_MMFR3_EL1, x12
+ msr ID_MMFR4_EL1, x12
msr ID_ISAR0_EL1, x12
msr ID_ISAR1_EL1, x12
msr ID_ISAR2_EL1, x12
@@ -3587,6 +3588,9 @@
// CHECK-ERROR-NEXT: msr ID_MMFR3_EL1, x12
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: expected writable system register or pstate
+// CHECK-ERROR-NEXT: msr ID_MMFR4_EL1, x12
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected writable system register or pstate
// CHECK-ERROR-NEXT: msr ID_ISAR0_EL1, x12
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: expected writable system register or pstate
diff --git a/test/MC/AArch64/basic-a64-instructions.s b/test/MC/AArch64/basic-a64-instructions.s
index 75c86efd2071c..5d33a4f933b3d 100644
--- a/test/MC/AArch64/basic-a64-instructions.s
+++ b/test/MC/AArch64/basic-a64-instructions.s
@@ -4306,6 +4306,7 @@ _func:
mrs x9, ID_MMFR1_EL1
mrs x9, ID_MMFR2_EL1
mrs x9, ID_MMFR3_EL1
+ mrs x9, ID_MMFR4_EL1
mrs x9, ID_ISAR0_EL1
mrs x9, ID_ISAR1_EL1
mrs x9, ID_ISAR2_EL1
@@ -4606,6 +4607,7 @@ _func:
// CHECK: mrs x9, {{id_mmfr1_el1|ID_MMFR1_EL1}} // encoding: [0xa9,0x01,0x38,0xd5]
// CHECK: mrs x9, {{id_mmfr2_el1|ID_MMFR2_EL1}} // encoding: [0xc9,0x01,0x38,0xd5]
// CHECK: mrs x9, {{id_mmfr3_el1|ID_MMFR3_EL1}} // encoding: [0xe9,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_mmfr4_el1|ID_MMFR4_EL1}} // encoding: [0xc9,0x02,0x38,0xd5]
// CHECK: mrs x9, {{id_isar0_el1|ID_ISAR0_EL1}} // encoding: [0x09,0x02,0x38,0xd5]
// CHECK: mrs x9, {{id_isar1_el1|ID_ISAR1_EL1}} // encoding: [0x29,0x02,0x38,0xd5]
// CHECK: mrs x9, {{id_isar2_el1|ID_ISAR2_EL1}} // encoding: [0x49,0x02,0x38,0xd5]
diff --git a/test/MC/AArch64/case-insen-reg-names.s b/test/MC/AArch64/case-insen-reg-names.s
new file mode 100644
index 0000000000000..b31ab675a7e1f
--- /dev/null
+++ b/test/MC/AArch64/case-insen-reg-names.s
@@ -0,0 +1,8 @@
+// RUN: llvm-mc -triple aarch64-none-linux-gnu -show-encoding < %s | FileCheck %s
+
+fadd v0.2d, v5.2d, v6.2d
+fadd V0.2d, V5.2d, V6.2d
+fadd v0.2d, V5.2d, v6.2d
+// CHECK: fadd v0.2d, v5.2d, v6.2d // encoding: [0xa0,0xd4,0x66,0x4e]
+// CHECK: fadd v0.2d, v5.2d, v6.2d // encoding: [0xa0,0xd4,0x66,0x4e]
+// CHECK: fadd v0.2d, v5.2d, v6.2d // encoding: [0xa0,0xd4,0x66,0x4e]
diff --git a/test/MC/ARM/arm-elf-symver.s b/test/MC/ARM/arm-elf-symver.s
index 4303540e82aab..ce9c4fe5f00e1 100644
--- a/test/MC/ARM/arm-elf-symver.s
+++ b/test/MC/ARM/arm-elf-symver.s
@@ -78,7 +78,7 @@ global1:
@ CHECK-NEXT: Section: .text
@ CHECK-NEXT: }
@ CHECK-NEXT: Symbol {
-@ CHECK-NEXT: Name: .text (0)
+@ CHECK-NEXT: Name: (0)
@ CHECK-NEXT: Value: 0x0
@ CHECK-NEXT: Size: 0
@ CHECK-NEXT: Binding: Local (0x0)
@@ -87,22 +87,22 @@ global1:
@ CHECK-NEXT: Section: .text
@ CHECK-NEXT: }
@ CHECK-NEXT: Symbol {
-@ CHECK-NEXT: Name: .data (0)
+@ CHECK-NEXT: Name: bar2@zed
@ CHECK-NEXT: Value: 0x0
@ CHECK-NEXT: Size: 0
-@ CHECK-NEXT: Binding: Local (0x0)
-@ CHECK-NEXT: Type: Section (0x3)
+@ CHECK-NEXT: Binding: Global (0x1)
+@ CHECK-NEXT: Type: None (0x0)
@ CHECK-NEXT: Other: 0
-@ CHECK-NEXT: Section: .data
+@ CHECK-NEXT: Section: Undefined (0x0)
@ CHECK-NEXT: }
@ CHECK-NEXT: Symbol {
-@ CHECK-NEXT: Name: .bss (0)
+@ CHECK-NEXT: Name: bar6@zed
@ CHECK-NEXT: Value: 0x0
@ CHECK-NEXT: Size: 0
-@ CHECK-NEXT: Binding: Local (0x0)
-@ CHECK-NEXT: Type: Section (0x3)
+@ CHECK-NEXT: Binding: Global (0x1)
+@ CHECK-NEXT: Type: None (0x0)
@ CHECK-NEXT: Other: 0
-@ CHECK-NEXT: Section: .bss
+@ CHECK-NEXT: Section: Undefined (0x0)
@ CHECK-NEXT: }
@ CHECK-NEXT: Symbol {
@ CHECK-NEXT: Name: g1@@zed
@@ -122,22 +122,4 @@ global1:
@ CHECK-NEXT: Other: 0
@ CHECK-NEXT: Section: .text
@ CHECK-NEXT: }
-@ CHECK-NEXT: Symbol {
-@ CHECK-NEXT: Name: bar2@zed
-@ CHECK-NEXT: Value: 0x0
-@ CHECK-NEXT: Size: 0
-@ CHECK-NEXT: Binding: Global (0x1)
-@ CHECK-NEXT: Type: None (0x0)
-@ CHECK-NEXT: Other: 0
-@ CHECK-NEXT: Section: Undefined (0x0)
-@ CHECK-NEXT: }
-@ CHECK-NEXT: Symbol {
-@ CHECK-NEXT: Name: bar6@zed
-@ CHECK-NEXT: Value: 0x0
-@ CHECK-NEXT: Size: 0
-@ CHECK-NEXT: Binding: Global (0x1)
-@ CHECK-NEXT: Type: None (0x0)
-@ CHECK-NEXT: Other: 0
-@ CHECK-NEXT: Section: Undefined (0x0)
-@ CHECK-NEXT: }
@ CHECK-NEXT: ]
diff --git a/test/MC/ARM/directive-arch-armv2.s b/test/MC/ARM/directive-arch-armv2.s
index 40857ca9fad8c..f6dc20c0a3630 100644
--- a/test/MC/ARM/directive-arch-armv2.s
+++ b/test/MC/ARM/directive-arch-armv2.s
@@ -20,7 +20,7 @@
@ CHECK-ATTR: }
@ CHECK-ATTR: Attribute {
@ CHECK-ATTR: TagName: CPU_arch
-@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: Description: Pre-v4
@ CHECK-ATTR: }
@ CHECK-ATTR: Attribute {
@ CHECK-ATTR: TagName: ARM_ISA_use
diff --git a/test/MC/ARM/directive-arch-armv2a.s b/test/MC/ARM/directive-arch-armv2a.s
index 62c2ace796f9b..bb0a693dc84a7 100644
--- a/test/MC/ARM/directive-arch-armv2a.s
+++ b/test/MC/ARM/directive-arch-armv2a.s
@@ -20,7 +20,7 @@
@ CHECK-ATTR: }
@ CHECK-ATTR: Attribute {
@ CHECK-ATTR: TagName: CPU_arch
-@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: Description: Pre-v4
@ CHECK-ATTR: }
@ CHECK-ATTR: Attribute {
@ CHECK-ATTR: TagName: ARM_ISA_use
diff --git a/test/MC/ARM/directive-arch-armv3.s b/test/MC/ARM/directive-arch-armv3.s
index 41cce659246b5..aeec638eac895 100644
--- a/test/MC/ARM/directive-arch-armv3.s
+++ b/test/MC/ARM/directive-arch-armv3.s
@@ -20,7 +20,7 @@
@ CHECK-ATTR: }
@ CHECK-ATTR: Attribute {
@ CHECK-ATTR: TagName: CPU_arch
-@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: Description: Pre-v4
@ CHECK-ATTR: }
@ CHECK-ATTR: Attribute {
@ CHECK-ATTR: TagName: ARM_ISA_use
diff --git a/test/MC/ARM/directive-arch-armv3m.s b/test/MC/ARM/directive-arch-armv3m.s
index 8041da2e1e52f..fda8db52e7bf7 100644
--- a/test/MC/ARM/directive-arch-armv3m.s
+++ b/test/MC/ARM/directive-arch-armv3m.s
@@ -20,7 +20,7 @@
@ CHECK-ATTR: }
@ CHECK-ATTR: Attribute {
@ CHECK-ATTR: TagName: CPU_arch
-@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: Description: Pre-v4
@ CHECK-ATTR: }
@ CHECK-ATTR: Attribute {
@ CHECK-ATTR: TagName: ARM_ISA_use
diff --git a/test/MC/ARM/directive-fpu-multiple.s b/test/MC/ARM/directive-fpu-multiple.s
index de2baaf27165b..66fc274159638 100644
--- a/test/MC/ARM/directive-fpu-multiple.s
+++ b/test/MC/ARM/directive-fpu-multiple.s
@@ -1,12 +1,28 @@
@ Check multiple .fpu directives.
@ The later .fpu directive should overwrite the earlier one.
-@ See also: directive-fpu-multiple2.s.
+@ We also check here that all the .fpu directives that we expect to work do work
@ RUN: llvm-mc -triple arm-eabi -filetype obj %s | llvm-readobj -arm-attributes \
@ RUN: | FileCheck %s -check-prefix CHECK-ATTR
+ .fpu none
+ .fpu vfp
+ .fpu vfpv2
+ .fpu vfpv3
+ .fpu vfpv3-d16
+ .fpu vfpv4
+ .fpu vfpv4-d16
+ .fpu fpv4-sp-d16
+ .fpu fpv5-d16
+ .fpu fpv5-sp-d16
+ .fpu fp-armv8
.fpu neon
+ .fpu neon-vfpv4
+ .fpu neon-fp-armv8
+ .fpu crypto-neon-fp-armv8
+ .fpu softvfp
+
.fpu vfpv4
@ CHECK-ATTR: FileAttributes {
diff --git a/test/MC/ARM/elf-movt.s b/test/MC/ARM/elf-movt.s
index 0fd77754795be..9b46cafe3fb08 100644
--- a/test/MC/ARM/elf-movt.s
+++ b/test/MC/ARM/elf-movt.s
@@ -49,10 +49,6 @@ barf: @ @barf
@ OBJ-NEXT: AddressAlignment: 4
@ OBJ-NEXT: EntrySize: 8
@ OBJ-NEXT: Relocations [
-@ OBJ-NEXT: 0x0 R_ARM_MOVW_PREL_NC
-@ OBJ-NEXT: 0x4 R_ARM_MOVT_PREL
+@ OBJ-NEXT: 0x0 R_ARM_MOVW_PREL_NC GOT 0x0
+@ OBJ-NEXT: 0x4 R_ARM_MOVT_PREL GOT 0x0
@ OBJ-NEXT: ]
-@ OBJ-NEXT: SectionData (
-@ OBJ-NEXT: 0000: 00000000 2D060000 04000000 2E060000 |....-...........|
-@ OBJ-NEXT: )
-@ OBJ-NEXT: }
diff --git a/test/MC/AsmParser/defsym.s b/test/MC/AsmParser/defsym.s
new file mode 100644
index 0000000000000..06981f5404661
--- /dev/null
+++ b/test/MC/AsmParser/defsym.s
@@ -0,0 +1,20 @@
+# RUN: llvm-mc -filetype=obj -triple=i386-unknown-elf -defsym a=7 -defsym b=11 %s | llvm-objdump -t - | FileCheck %s
+
+.ifndef a
+.err
+.endif
+
+.if a<>7
+.err
+.endif
+
+.ifndef b
+.err
+.endif
+
+.if b<>11
+.err
+.endif
+
+# CHECK: 00000007 *ABS* 00000000 a
+# CHECK: 0000000b *ABS* 00000000 b \ No newline at end of file
diff --git a/test/MC/AsmParser/defsym_error1.s b/test/MC/AsmParser/defsym_error1.s
new file mode 100644
index 0000000000000..872577844d469
--- /dev/null
+++ b/test/MC/AsmParser/defsym_error1.s
@@ -0,0 +1,2 @@
+# RUN: not llvm-mc -filetype=obj -triple=i386-unknown-elf -defsym aaoeuaoeu %s 2>&1 | FileCheck %s
+# CHECK: defsym must be of the form: sym=value
diff --git a/test/MC/AsmParser/defsym_error2.s b/test/MC/AsmParser/defsym_error2.s
new file mode 100644
index 0000000000000..ec4cc79182fa3
--- /dev/null
+++ b/test/MC/AsmParser/defsym_error2.s
@@ -0,0 +1,2 @@
+# RUN: not llvm-mc -filetype=obj -triple=i386-unknown-elf -defsym a=a %s 2>&1 | FileCheck %s
+# CHECK: error: Value is not an integer: a
diff --git a/test/MC/COFF/cross-section-relative.ll b/test/MC/COFF/cross-section-relative.ll
index 18823f8b29a13..6b7a3d7b40b2f 100644
--- a/test/MC/COFF/cross-section-relative.ll
+++ b/test/MC/COFF/cross-section-relative.ll
@@ -1,7 +1,5 @@
; Verify the assembler produces the expected expressions
; RUN: llc -mtriple=x86_64-pc-win32 %s -o - | FileCheck %s
-; Verify the .fix data section conveys the right offsets and the right relocations
-; RUN: llc -mtriple=x86_64-pc-win32 -filetype=obj %s -o - | llvm-readobj -relocations -expand-relocs -sections -section-data | FileCheck %s --check-prefix=READOBJ
;;;; some globals
@@ -37,61 +35,3 @@
i64 256,
i32 trunc(i64 sub(i64 ptrtoint(i32* @g3 to i64), i64 ptrtoint(i32* getelementptr inbounds (%struct.EEType, %struct.EEType* @t6, i32 0, i32 2) to i64)) to i32 )
}, section ".fix"
-
-; READOBJ: Section {
-; READOBJ: Number: 5
-; READOBJ: Name: .fix (2E 66 69 78 00 00 00 00)
-; READOBJ: VirtualSize: 0x0
-; READOBJ: VirtualAddress: 0x0
-; READOBJ: RawDataSize: 56
-; READOBJ: PointerToRawData: 0xEC
-; READOBJ: PointerToRelocations: 0x124
-; READOBJ: PointerToLineNumbers: 0x0
-; READOBJ: RelocationCount: 6
-; READOBJ: LineNumberCount: 0
-; READOBJ: Characteristics [ (0xC0500040)
-; READOBJ: IMAGE_SCN_ALIGN_16BYTES (0x500000)
-; READOBJ: IMAGE_SCN_CNT_INITIALIZED_DATA (0x40)
-; READOBJ: IMAGE_SCN_MEM_READ (0x40000000)
-; READOBJ: IMAGE_SCN_MEM_WRITE (0x80000000)
-; READOBJ: ]
-; READOBJ: SectionData (
-; READOBJ: 0000: 10000000 00000000 0C000000 00000000 |................|
-; READOBJ: 0010: 08000000 00000000 0C000000 00000000 |................|
-; READOBJ: 0020: 01020000 00000000 00010000 00000000 |................|
-; READOBJ: 0030: 0C000000 00000000 |........|
-; READOBJ: )
-; READOBJ: }
-; READOBJ: ]
-; READOBJ: Relocations [
-; READOBJ: Section (5) .fix {
-; READOBJ: Relocation {
-; READOBJ: Offset: 0x0
-; READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
-; READOBJ: Symbol: .rdata
-; READOBJ: }
-; READOBJ: Relocation {
-; READOBJ: Offset: 0x8
-; READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
-; READOBJ: Symbol: .rdata
-; READOBJ: }
-; READOBJ: Relocation {
-; READOBJ: Offset: 0x10
-; READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
-; READOBJ: Symbol: .rdata
-; READOBJ: }
-; READOBJ: Relocation {
-; READOBJ: Offset: 0x18
-; READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
-; READOBJ: Symbol: .rdata
-; READOBJ: }
-; READOBJ: Relocation {
-; READOBJ: Offset: 0x1C
-; READOBJ: Type: IMAGE_REL_AMD64_ADDR32NB (3)
-; READOBJ: Symbol: g3
-; READOBJ: }
-; READOBJ: Relocation {
-; READOBJ: Offset: 0x30
-; READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
-; READOBJ: Symbol: .rdata
-; READOBJ: }
diff --git a/test/MC/COFF/cross-section-relative.s b/test/MC/COFF/cross-section-relative.s
new file mode 100644
index 0000000000000..dd94b0a5e83ab
--- /dev/null
+++ b/test/MC/COFF/cross-section-relative.s
@@ -0,0 +1,118 @@
+// Verify the .fix data section conveys the right offsets and the right relocations
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-win32 %s -o - | llvm-readobj -relocations -expand-relocs -sections -section-data | FileCheck %s --check-prefix=READOBJ
+
+ .text
+ .section .rdata,"dr"
+ .globl g1 # @g1
+ .align 4
+g1:
+ .long 1 # 0x1
+
+ .globl g2 # @g2
+ .align 4
+g2:
+ .long 2 # 0x2
+
+ .globl g3 # @g3
+ .align 4
+g3:
+ .long 3 # 0x3
+
+ .globl g4 # @g4
+ .align 4
+g4:
+ .long 4 # 0x4
+
+ .section .fix,"dw"
+ .globl t1 # @t1
+ .align 8
+t1:
+ .quad (g3-t1)+4
+
+ .globl t2 # @t2
+ .align 8
+t2:
+ .quad g3-t2
+
+ .globl t3 # @t3
+ .align 8
+t3:
+ .quad (g3-t3)-4
+
+ .globl t4 # @t4
+ .align 4
+t4:
+ .long g3-t4
+
+ .globl t5 # @t5
+ .align 4
+t5:
+ .long g3@IMGREL
+
+ .globl t6 # @t6
+ .align 16
+t6:
+ .ascii "\001\002"
+ .zero 6
+ .quad 256 # 0x100
+ .long g3-(t6+16)
+ .zero 4
+
+
+// READOBJ: Section {
+// READOBJ: Number: 5
+// READOBJ: Name: .fix (2E 66 69 78 00 00 00 00)
+// READOBJ: VirtualSize: 0x0
+// READOBJ: VirtualAddress: 0x0
+// READOBJ: RawDataSize: 56
+// READOBJ: PointerToRawData: 0xEC
+// READOBJ: PointerToRelocations: 0x124
+// READOBJ: PointerToLineNumbers: 0x0
+// READOBJ: RelocationCount: 6
+// READOBJ: LineNumberCount: 0
+// READOBJ: Characteristics [ (0xC0500040)
+// READOBJ: IMAGE_SCN_ALIGN_16BYTES (0x500000)
+// READOBJ: IMAGE_SCN_CNT_INITIALIZED_DATA (0x40)
+// READOBJ: IMAGE_SCN_MEM_READ (0x40000000)
+// READOBJ: IMAGE_SCN_MEM_WRITE (0x80000000)
+// READOBJ: ]
+// READOBJ: SectionData (
+// READOBJ: 0000: 10000000 00000000 0C000000 00000000 |................|
+// READOBJ: 0010: 08000000 00000000 0C000000 00000000 |................|
+// READOBJ: 0020: 01020000 00000000 00010000 00000000 |................|
+// READOBJ: 0030: 0C000000 00000000 |........|
+// READOBJ: )
+// READOBJ: }
+// READOBJ: ]
+// READOBJ: Relocations [
+// READOBJ: Section (5) .fix {
+// READOBJ: Relocation {
+// READOBJ: Offset: 0x0
+// READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
+// READOBJ: Symbol: .rdata
+// READOBJ: }
+// READOBJ: Relocation {
+// READOBJ: Offset: 0x8
+// READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
+// READOBJ: Symbol: .rdata
+// READOBJ: }
+// READOBJ: Relocation {
+// READOBJ: Offset: 0x10
+// READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
+// READOBJ: Symbol: .rdata
+// READOBJ: }
+// READOBJ: Relocation {
+// READOBJ: Offset: 0x18
+// READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
+// READOBJ: Symbol: .rdata
+// READOBJ: }
+// READOBJ: Relocation {
+// READOBJ: Offset: 0x1C
+// READOBJ: Type: IMAGE_REL_AMD64_ADDR32NB (3)
+// READOBJ: Symbol: g3
+// READOBJ: }
+// READOBJ: Relocation {
+// READOBJ: Offset: 0x30
+// READOBJ: Type: IMAGE_REL_AMD64_REL32 (4)
+// READOBJ: Symbol: .rdata
+// READOBJ: }
diff --git a/test/MC/Disassembler/AArch64/armv8.1a-atomic.txt b/test/MC/Disassembler/AArch64/armv8.1a-atomic.txt
new file mode 100644
index 0000000000000..b20fabb8c6f1e
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/armv8.1a-atomic.txt
@@ -0,0 +1,87 @@
+# RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=+v8.1a --disassemble < %s | FileCheck %s
+
+0x41,0x7c,0xa0,0x08
+0x41,0x7c,0xe0,0x08
+0x41,0xfc,0xa0,0x08
+0x41,0xfc,0xe0,0x08
+0x41,0x7c,0xa0,0x48
+0x41,0x7c,0xe0,0x48
+0x41,0xfc,0xa0,0x48
+0x41,0xfc,0xe0,0x48
+# CHECK: casb w0, w1, [x2]
+# CHECK: casab w0, w1, [x2]
+# CHECK: caslb w0, w1, [x2]
+# CHECK: casalb w0, w1, [x2]
+# CHECK: cash w0, w1, [x2]
+# CHECK: casah w0, w1, [x2]
+# CHECK: caslh w0, w1, [x2]
+# CHECK: casalh w0, w1, [x2]
+
+0x41,0x7c,0xa0,0x88
+0x41,0x7c,0xe0,0x88
+0x41,0xfc,0xa0,0x88
+0x41,0xfc,0xe0,0x88
+0x41,0x7c,0xa0,0xc8
+0x41,0x7c,0xe0,0xc8
+0x41,0xfc,0xa0,0xc8
+0x41,0xfc,0xe0,0xc8
+# CHECK: cas w0, w1, [x2]
+# CHECK: casa w0, w1, [x2]
+# CHECK: casl w0, w1, [x2]
+# CHECK: casal w0, w1, [x2]
+# CHECK: cas x0, x1, [x2]
+# CHECK: casa x0, x1, [x2]
+# CHECK: casl x0, x1, [x2]
+# CHECK: casal x0, x1, [x2]
+
+0x41,0x80,0x20,0xf8
+0x41,0x80,0x20,0x38
+0x41,0x80,0x60,0x78
+0xe1,0x83,0xe0,0xf8
+# CHECK: swp x0, x1, [x2]
+# CHECK: swpb w0, w1, [x2]
+# CHECK: swplh w0, w1, [x2]
+# CHECK: swpal x0, x1, [sp]
+
+0x41,0x00,0xa0,0xf8
+0x41,0x10,0x60,0xf8
+0x41,0x20,0xe0,0xf8
+0x41,0x30,0x20,0xf8
+0x41,0x40,0xa0,0xb8
+0x41,0x50,0x60,0x38
+0x41,0x60,0xe0,0x78
+0x41,0x70,0x20,0xb8
+0xab,0x51,0xe7,0x78
+# CHECK: ldadda x0, x1, [x2]
+# CHECK: ldclrl x0, x1, [x2]
+# CHECK: ldeoral x0, x1, [x2]
+# CHECK: ldset x0, x1, [x2]
+# CHECK: ldsmaxa w0, w1, [x2]
+# CHECK: ldsminlb w0, w1, [x2]
+# CHECK: ldumaxalh w0, w1, [x2]
+# CHECK: ldumin w0, w1, [x2]
+# CHECK: ldsminalh w7, w11, [x13]
+
+0x5f,0x00,0x60,0x38
+0x5f,0x10,0x60,0x78
+0x5f,0x20,0x60,0xb8
+0x5f,0x30,0x60,0xf8
+0x5f,0x40,0x20,0x38
+0x5f,0x50,0x20,0x78
+0x5f,0x60,0x20,0xb8
+0x5f,0x70,0x20,0xf8
+0xff,0x53,0x7d,0xf8
+# CHECK: staddlb w0, [x2]
+# CHECK: stclrlh w0, [x2]
+# CHECK: steorl w0, [x2]
+# CHECK: stsetl x0, [x2]
+# CHECK: stsmaxb w0, [x2]
+# CHECK: stsminh w0, [x2]
+# CHECK: stumax w0, [x2]
+# CHECK: stumin x0, [x2]
+# CHECK: stsminl x29, [sp]
+
+0x82,0x7c,0x20,0x48
+0x82,0x7c,0x20,0x08
+# CHECK: casp x0, x1, x2, x3, [x4]
+# CHECK: casp w0, w1, w2, w3, [x4]
diff --git a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
index c777f7aa6494c..615d9ba19ca8e 100644
--- a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
+++ b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
@@ -3414,6 +3414,7 @@
# CHECK: mrs x9, {{id_mmfr1_el1|ID_MMFR1_EL1}}
# CHECK: mrs x9, {{id_mmfr2_el1|ID_MMFR2_EL1}}
# CHECK: mrs x9, {{id_mmfr3_el1|ID_MMFR3_EL1}}
+# CHECK: mrs x9, {{id_mmfr4_el1|ID_MMFR4_EL1}}
# CHECK: mrs x9, {{id_isar0_el1|ID_ISAR0_EL1}}
# CHECK: mrs x9, {{id_isar1_el1|ID_ISAR1_EL1}}
# CHECK: mrs x9, {{id_isar2_el1|ID_ISAR2_EL1}}
@@ -3968,6 +3969,7 @@
0xa9 0x1 0x38 0xd5
0xc9 0x1 0x38 0xd5
0xe9 0x1 0x38 0xd5
+0xc9 0x2 0x38 0xd5
0x9 0x2 0x38 0xd5
0x29 0x2 0x38 0xd5
0x49 0x2 0x38 0xd5
diff --git a/test/MC/Disassembler/PowerPC/vsx.txt b/test/MC/Disassembler/PowerPC/vsx.txt
index 04b2eeb0dd2aa..6f4ba6f6b9ac2 100644
--- a/test/MC/Disassembler/PowerPC/vsx.txt
+++ b/test/MC/Disassembler/PowerPC/vsx.txt
@@ -90,6 +90,12 @@
# CHECK: xsmaddmdp 7, 63, 27
0xf0 0xff 0xd9 0x4c
+# CHECK: xsmaddasp 7, 63, 27
+0xf0 0xff 0xd8 0x0c
+
+# CHECK: xsmaddmsp 7, 63, 27
+0xf0 0xff 0xd8 0x4c
+
# CHECK: xsmaxdp 7, 63, 27
0xf0 0xff 0xdd 0x04
@@ -102,6 +108,12 @@
# CHECK: xsmsubmdp 7, 63, 27
0xf0 0xff 0xd9 0xcc
+# CHECK: xsmsubasp 7, 63, 27
+0xf0 0xff 0xd8 0x8c
+
+# CHECK: xsmsubmsp 7, 63, 27
+0xf0 0xff 0xd8 0xcc
+
# CHECK: xsmulsp 7, 63, 27
0xf0 0xff 0xd8 0x84
@@ -126,6 +138,18 @@
# CHECK: xsnmsubmdp 7, 63, 27
0xf0 0xff 0xdd 0xcc
+# CHECK: xsnmaddasp 7, 63, 27
+0xf0 0xff 0xdc 0x0c
+
+# CHECK: xsnmaddmsp 7, 63, 27
+0xf0 0xff 0xdc 0x4c
+
+# CHECK: xsnmsubasp 7, 63, 27
+0xf0 0xff 0xdc 0x8c
+
+# CHECK: xsnmsubmsp 7, 63, 27
+0xf0 0xff 0xdc 0xcc
+
# CHECK: xsrdpi 7, 27
0xf0 0xe0 0xd9 0x24
diff --git a/test/MC/ELF/alias.s b/test/MC/ELF/alias.s
index 0ab6dd4b5b8e9..df39fd4be147d 100644
--- a/test/MC/ELF/alias.s
+++ b/test/MC/ELF/alias.s
@@ -107,33 +107,25 @@ leaq .Llocal1(%rip), %rdi
// CHECK-NEXT: Section: .text
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .text (0)
+// CHECK-NEXT: Name: (0)
// CHECK-NOT: Symbol {
// CHECK: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .data (0)
-// CHECK-NOT: Symbol {
-// CHECK: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .bss (0)
-// CHECK-NOT: Symbol {
-// CHECK: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar3
+// CHECK-NEXT: Name: bar2
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar2
+// CHECK-NEXT: Name: bar3
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: Undefined (0x0)
+// CHECK-NEXT: Section: .text
// CHECK-NEXT: }
// CHECK-NEXT: ]
diff --git a/test/MC/ELF/basic-elf-32.s b/test/MC/ELF/basic-elf-32.s
index 16266af7f4452..1036b04a747bc 100644
--- a/test/MC/ELF/basic-elf-32.s
+++ b/test/MC/ELF/basic-elf-32.s
@@ -54,20 +54,6 @@ main: # @main
// CHECK: ]
// CHECK: Symbols [
-// CHECK: Symbol {
-// CHECK: Binding: Local
-// CHECK: Type: Section
-// CHECK: }
-
-// CHECK: Symbol {
-// CHECK: Binding: Local
-// CHECK: Type: Section
-// CHECK: }
-
-// CHECK: Symbol {
-// CHECK: Binding: Local
-// CHECK: Type: Section
-// CHECK: }
// CHECK: Symbol {
// CHECK: Name: main
diff --git a/test/MC/ELF/basic-elf-64.s b/test/MC/ELF/basic-elf-64.s
index d99125e06a91b..b93f9aebd3a09 100644
--- a/test/MC/ELF/basic-elf-64.s
+++ b/test/MC/ELF/basic-elf-64.s
@@ -58,16 +58,6 @@ main: # @main
// CHECK: Type: Section
// CHECK: Symbol {
-// CHECK: Binding: Local
-// CHECK: Type: Section
-// CHECK: }
-
-// CHECK: Symbol {
-// CHECK: Binding: Local
-// CHECK: Type: Section
-// CHECK: }
-
-// CHECK: Symbol {
// CHECK: Name: main
// CHECK: Binding: Global
// CHECK: Type: Function
diff --git a/test/MC/ELF/comdat-dup-group-name.s b/test/MC/ELF/comdat-dup-group-name.s
index a2dc4cc33d987..e52f3dcc04d1c 100644
--- a/test/MC/ELF/comdat-dup-group-name.s
+++ b/test/MC/ELF/comdat-dup-group-name.s
@@ -24,15 +24,6 @@
// CHECK-NOT: }
// CHECK: Section: .group (0x7)
-// CHECK: Name: .foo
-// CHECK-NOT: }
-// CHECK: Section: .foo (0x6)
-
-// CHECK: Name: .foo
-// CHECK-NOT: }
-// CHECK: Section: .foo (0x8)
-
-
.section .foo,"axG",@progbits,f1,comdat
nop
diff --git a/test/MC/ELF/comdat.s b/test/MC/ELF/comdat.s
index e71dea089c4d4..18da17e6118cd 100644
--- a/test/MC/ELF/comdat.s
+++ b/test/MC/ELF/comdat.s
@@ -46,7 +46,7 @@
// CHECK-NEXT: Offset:
// CHECK-NEXT: Size: 12
// CHECK-NEXT: Link:
-// CHECK-NEXT: Info: 10
+// CHECK-NEXT: Info: 3
// CHECK-NEXT: AddressAlignment: 4
// CHECK-NEXT: EntrySize: 4
// CHECK-NEXT: SectionData (
diff --git a/test/MC/ELF/common-error3.s b/test/MC/ELF/common-error3.s
new file mode 100644
index 0000000000000..a84779e653e0a
--- /dev/null
+++ b/test/MC/ELF/common-error3.s
@@ -0,0 +1,5 @@
+# RUN: not llvm-mc -filetype=obj -triple x86_64-pc-linux %s 2>&1 | FileCheck %s
+
+# CHECK: Symbol: C redeclared as different type
+ .comm C,4,4
+ .comm C,8,4 \ No newline at end of file
diff --git a/test/MC/ELF/common-redeclare.s b/test/MC/ELF/common-redeclare.s
new file mode 100644
index 0000000000000..f8ee17d84e2ed
--- /dev/null
+++ b/test/MC/ELF/common-redeclare.s
@@ -0,0 +1,5 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux %s | llvm-objdump -t - | FileCheck %s
+
+# CHECK: 0000000000000004 g *COM* 00000004 C
+ .comm C,4,4
+ .comm C,4,4 \ No newline at end of file
diff --git a/test/MC/ELF/empty.s b/test/MC/ELF/empty.s
index 6ddbd8c942a25..7b686fef3a234 100644
--- a/test/MC/ELF/empty.s
+++ b/test/MC/ELF/empty.s
@@ -78,9 +78,9 @@
// CHECK-NEXT: ]
// CHECK-NEXT: Address: 0x0
// CHECK-NEXT: Offset:
-// CHECK-NEXT: Size: 96
+// CHECK-NEXT: Size: 24
// CHECK-NEXT: Link:
-// CHECK-NEXT: Info: 4
+// CHECK-NEXT: Info: 1
// CHECK-NEXT: AddressAlignment: 8
// CHECK-NEXT: EntrySize: 24
// CHECK-NEXT: }
diff --git a/test/MC/ELF/got.s b/test/MC/ELF/got.s
index 30114b75d84da..3fe3e4a62b1a5 100644
--- a/test/MC/ELF/got.s
+++ b/test/MC/ELF/got.s
@@ -1,7 +1,7 @@
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -r -t | FileCheck %s
-// Test that this produces a R_X86_64_GOT32 and that we have an undefined
-// reference to _GLOBAL_OFFSET_TABLE_.
+// Test that this produces the correct relocations R_X86_64_GOT32 and that we,
+// unlike gas, don't create a _GLOBAL_OFFSET_TABLE_ symbol as a side effect.
movl foo@GOT, %eax
movl foo@GOTPCREL(%rip), %eax
@@ -13,8 +13,5 @@
// CHECK-NEXT: }
// CHECK-NEXT: ]
-// CHECK: Symbol {
-// CHECK: Name: _GLOBAL_OFFSET_TABLE_
-// CHECK-NEXT: Value:
-// CHECK-NEXT: Size:
-// CHECK-NEXT: Binding: Global
+// CHECK: Symbols [
+// CHECK-NOT: _GLOBAL_OFFSET_TABLE_
diff --git a/test/MC/ELF/many-sections-2.s b/test/MC/ELF/many-sections-2.s
index b52c01af49050..0077552ef3130 100644
--- a/test/MC/ELF/many-sections-2.s
+++ b/test/MC/ELF/many-sections-2.s
@@ -32,13 +32,13 @@
// Test that this file has one section too many.
-// SYMBOLS: Name: dm
-// SYMBOLS-NEXT: Value: 0x0
-// SYMBOLS-NEXT: Size: 0
-// SYMBOLS-NEXT: Binding: Local (0x0)
-// SYMBOLS-NEXT: Type: Section (0x3)
-// SYMBOLS-NEXT: Other: 0
-// SYMBOLS-NEXT: Section: dm (0xFF00)
+// SYMBOLS: Name: (0)
+// SYMBOLS: Value: 0x0
+// SYMBOLS: Size: 0
+// SYMBOLS: Binding: Local (0x0)
+// SYMBOLS: Type: Section (0x3)
+// SYMBOLS: Other: 0
+// SYMBOLS: Section: dm (0xFF00)
// SYMBOLS-NEXT: }
// SYMBOLS-NEXT:]
@@ -130,3 +130,4 @@ gen_sections4 m
a:
b = a + 1
+.long dm
diff --git a/test/MC/ELF/many-sections.s b/test/MC/ELF/many-sections.s
index dbba8711fdf67..2db6abb9321bf 100644
--- a/test/MC/ELF/many-sections.s
+++ b/test/MC/ELF/many-sections.s
@@ -9,13 +9,13 @@
// Check the last referenced section.
-// SYMBOLS: Name: zed
-// SYMBOLS-NEXT: Value: 0x0
-// SYMBOLS-NEXT: Size: 0
-// SYMBOLS-NEXT: Binding: Local (0x0)
-// SYMBOLS-NEXT: Type: Section (0x3)
-// SYMBOLS-NEXT: Other: 0
-// SYMBOLS-NEXT: Section: zed (0xFEFF)
+// SYMBOLS: Name: (0)
+// SYMBOLS: Value: 0x0
+// SYMBOLS: Size: 0
+// SYMBOLS: Binding: Local (0x0)
+// SYMBOLS: Type: Section (0x3)
+// SYMBOLS: Other: 0
+// SYMBOLS: Section: zed (0xFEFF)
// SYMBOLS-NEXT: }
// SYMBOLS-NEXT:]
@@ -106,3 +106,4 @@ gen_sections8 l
.section foo
.section bar
.section zed
+.long zed
diff --git a/test/MC/ELF/noexec.s b/test/MC/ELF/noexec.s
index 3769427a3b1ab..e8f460e65fa0a 100644
--- a/test/MC/ELF/noexec.s
+++ b/test/MC/ELF/noexec.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -no-exec-stack -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -s -t | FileCheck %s
+// RUN: llvm-mc -no-exec-stack -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -s | FileCheck %s
// CHECK: Section {
// CHECK: Index:
@@ -14,13 +14,3 @@
// CHECK-NEXT: AddressAlignment: 1
// CHECK-NEXT: EntrySize: 0
// CHECK-NEXT: }
-
-// CHECK: Symbol {
-// CHECK: Name: .note.GNU-stack (0)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .note.GNU-stack
-// CHECK-NEXT: }
diff --git a/test/MC/ELF/popsection.s b/test/MC/ELF/popsection.s
new file mode 100644
index 0000000000000..19f55688a1b20
--- /dev/null
+++ b/test/MC/ELF/popsection.s
@@ -0,0 +1,21 @@
+// RUN: llvm-mc -filetype=obj %s -o - -triple x86_64-pc-linux | llvm-readobj -s - | FileCheck %s
+
+// This used to crash. Test that it create an empty section instead.
+
+ .pushsection foo
+ .popsection
+
+// CHECK: Section {
+// CHECK: Index: 5
+// CHECK-NEXT: Name: foo
+// CHECK-NEXT: Type: SHT_PROGBITS
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Address: 0x0
+// CHECK-NEXT: Offset:
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Link: 0
+// CHECK-NEXT: Info: 0
+// CHECK-NEXT: AddressAlignment: 1
+// CHECK-NEXT: EntrySize: 0
+// CHECK-NEXT: }
diff --git a/test/MC/ELF/reloc-same-name-section.s b/test/MC/ELF/reloc-same-name-section.s
index e63ea548e5aa8..57463c65f8909 100644
--- a/test/MC/ELF/reloc-same-name-section.s
+++ b/test/MC/ELF/reloc-same-name-section.s
@@ -7,13 +7,13 @@
// CHECK-NEXT: Relocation {
// CHECK-NEXT: Offset:
// CHECK-NEXT: Type:
-// CHECK-NEXT: Symbol: .foo (7)
+// CHECK-NEXT: Symbol: .foo (4)
// CHECK-NEXT: Addend:
// CHECK-NEXT: }
// CHECK-NEXT: Relocation {
// CHECK-NEXT: Offset:
// CHECK-NEXT: Type:
-// CHECK-NEXT: Symbol: .foo (8)
+// CHECK-NEXT: Symbol: .foo (5)
// CHECK-NEXT: Addend:
// CHECK-NEXT: }
// CHECK-NEXT: }
diff --git a/test/MC/ELF/relocation-386.s b/test/MC/ELF/relocation-386.s
index 2af6addc3d2b2..6b7e02f03ea0a 100644
--- a/test/MC/ELF/relocation-386.s
+++ b/test/MC/ELF/relocation-386.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - | llvm-readobj -r -t | FileCheck %s
+// RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - | llvm-readobj -r | FileCheck %s
// Test that we produce the correct relocation types and that the relocations
// correctly point to the section or the symbol.
@@ -71,27 +71,6 @@
// CHECK-NEXT: }
// CHECK-NEXT: ]
-// Symbol 4 is zed
-// CHECK: Symbol {
-// CHECK: Name: zed
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: TLS
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: zedsec
-// CHECK-NEXT: }
-// Symbol 7 is section 4
-// CHECK: Symbol {
-// CHECK: Name: .bss (0)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .bss
-// CHECK-NEXT: }
-
.text
bar:
leal .Lfoo@GOTOFF(%ebx), %eax
diff --git a/test/MC/ELF/relocation.s b/test/MC/ELF/relocation.s
index 2841612f2e112..34f1a4038131b 100644
--- a/test/MC/ELF/relocation.s
+++ b/test/MC/ELF/relocation.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -s -sr -t | FileCheck %s
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -s -sr | FileCheck %s
// Test that we produce the correct relocation.
@@ -54,6 +54,10 @@ bar:
.quad pr23272_2 - pr23272
.quad pr23272_3 - pr23272
+
+ .code16
+ call pr23771
+
// CHECK: Section {
// CHECK: Name: .rela.text
// CHECK: Relocations [
@@ -90,15 +94,6 @@ bar:
// CHECK-NEXT: 0xD4 R_X86_64_SIZE32 blah 0xFFFFFFFFFFFFFFE0
// CHECK-NEXT: 0xD8 R_X86_64_GOTPCREL foo 0x0
// CHECK-NEXT: 0xDC R_X86_64_PLT32 foo 0x0
+// CHECK-NEXT: 0xF1 R_X86_64_PC16 pr23771 0xFFFFFFFFFFFFFFFE
// CHECK-NEXT: ]
// CHECK-NEXT: }
-
-// CHECK: Symbol {
-// CHECK: Name: .text (0)
-// CHECK-NEXT: Value:
-// CHECK-NEXT: Size:
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text
-// CHECK-NEXT: }
diff --git a/test/MC/ELF/section-sym.s b/test/MC/ELF/section-sym.s
index f012b2f1bed71..4a9484d9b7792 100644
--- a/test/MC/ELF/section-sym.s
+++ b/test/MC/ELF/section-sym.s
@@ -40,52 +40,54 @@
// CHECK-NEXT: EntrySize: 0
// CHECK-NEXT: }
-// The relocation points to symbol 6
+// The relocation points to symbol 3
// CHECK: Relocations [
// CHECK-NEXT: Section ({{.*}}) .relabar {
// CHECK-NEXT: Relocation {
// CHECK-NEXT: Offset: 0x0
// CHECK-NEXT: Type: R_X86_64_32 (10)
-// CHECK-NEXT: Symbol: foo (6)
+// CHECK-NEXT: Symbol: foo (3)
// CHECK-NEXT: Addend: 0x0
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ]
-
-// The symbol 6 corresponds section 6
+// Symbol 3 is section 6
// CHECK: Symbols [
-
-// symbol 0
-// CHECK-NOT: Name
-// CHECK: Name:
-
-// symbol 1
-// CHECK-NOT: Name
-// CHECK: Name: f1
-
-// symbol 2
-// CHECK-NOT: Name
-// CHECK: Name: f2
-
-// symbol 3
-// CHECK-NOT: Name
-// CHECK: Name: .text
-
-// symbol 4
-// CHECK-NOT: Name
-// CHECK: Name: .data
-
-// symbol 5
-// CHECK-NOT: Name
-// CHECK: Name: .bss
-
-// symbol 6
-// CHECK-NOT: Name
-// CHECK: Name: foo
-// CHECK: Section: foo (0x6)
-
-// symbol 7
-// CHECK-NOT: Name
-// CHECK: Name: foo
-// CHECK: Section: foo (0x8)
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: (0)
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: None (0x0)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: Undefined (0x0)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: f1 (57)
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: None (0x0)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .group (0x5)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: f2 (54)
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: None (0x0)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .group (0x7)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: (0)
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: Section (0x3)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: foo (0x6)
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
diff --git a/test/MC/ELF/section-sym2.s b/test/MC/ELF/section-sym2.s
index f62e3f9f8a304..ca3863223dbeb 100644
--- a/test/MC/ELF/section-sym2.s
+++ b/test/MC/ELF/section-sym2.s
@@ -19,10 +19,6 @@ mov .rodata, %rsi
// There is only one .rodata symbol
// CHECK:Symbols [
-// CHECK-NOT: Name: .rodata
-// CHECK: Name: .rodata
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local (0x0)
-// CHECK-NEXT: Type: Section (0x3)
-// CHECK-NOT: Name: .rodata
+// CHECK: Type: Section (0x3)
+// CHECK: Section: .rodata
+// CHECK-NOT: Section: .rodata
diff --git a/test/MC/ELF/strtab-suffix-opt.s b/test/MC/ELF/strtab-suffix-opt.s
index 0524656446037..96d15005c6185 100644
--- a/test/MC/ELF/strtab-suffix-opt.s
+++ b/test/MC/ELF/strtab-suffix-opt.s
@@ -16,6 +16,6 @@ foobar:
.Ltmp3:
.size foobar, .Ltmp3-foobar
-// CHECK: Name: foobar (16)
// CHECK: Name: bar (19)
// CHECK: Name: foo (23)
+// CHECK: Name: foobar (16)
diff --git a/test/MC/ELF/symver.s b/test/MC/ELF/symver.s
index 80d71fd0e0692..0f434dd859d38 100644
--- a/test/MC/ELF/symver.s
+++ b/test/MC/ELF/symver.s
@@ -77,7 +77,7 @@ global1:
// CHECK-NEXT: Section: .text
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .text
+// CHECK-NEXT: Name: (0)
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -86,22 +86,22 @@ global1:
// CHECK-NEXT: Section: .text
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .data
+// CHECK-NEXT: Name: bar2@zed
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .data
+// CHECK-NEXT: Section: Undefined
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .bss
+// CHECK-NEXT: Name: bar6@zed
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .bss
+// CHECK-NEXT: Section: Undefined
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: g1@@zed
@@ -121,22 +121,4 @@ global1:
// CHECK-NEXT: Other: 0
// CHECK-NEXT: Section: .text
// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar2@zed
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: Undefined
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar6@zed
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: Undefined
-// CHECK-NEXT: }
// CHECK-NEXT: ]
diff --git a/test/MC/ELF/undef.s b/test/MC/ELF/undef.s
index 245b56328248b..9577ea22875b1 100644
--- a/test/MC/ELF/undef.s
+++ b/test/MC/ELF/undef.s
@@ -42,42 +42,6 @@ test2_b = undef + 1
// CHECK-NEXT: Section: .rodata.str1.1
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .text
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .data
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .data
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .bss
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .bss
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .rodata.str1.1
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .rodata.str1.1
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: .Lsym1
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
diff --git a/test/MC/ELF/weakref-reloc.s b/test/MC/ELF/weakref-reloc.s
index 484167f0434ea..baf80060c4336 100644
--- a/test/MC/ELF/weakref-reloc.s
+++ b/test/MC/ELF/weakref-reloc.s
@@ -1,7 +1,6 @@
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -r -t | FileCheck %s
-// Test that the relocations point to the correct symbols. We used to get the
-// symbol index wrong for weakrefs when creating _GLOBAL_OFFSET_TABLE_.
+// Test that the relocations point to the correct symbols.
.weakref bar,foo
call zed@PLT
@@ -13,32 +12,3 @@
// CHECK-NEXT: 0x6 R_X86_64_PC32 foo 0xFFFFFFFFFFFFFFFC
// CHECK-NEXT: }
// CHECK-NEXT: ]
-
-// CHECK: Symbols [
-// CHECK: Symbol {
-// CHECK: Name: _GLOBAL_OFFSET_TABLE_
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: Undefined (0x0)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Weak
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: Undefined (0x0)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: zed
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: Undefined (0x0)
-// CHECK-NEXT: }
diff --git a/test/MC/ELF/weakref.s b/test/MC/ELF/weakref.s
index d263af31a0df6..d342c61f745ab 100644
--- a/test/MC/ELF/weakref.s
+++ b/test/MC/ELF/weakref.s
@@ -116,7 +116,7 @@ bar15:
// CHECK-NEXT: Section: .text
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .text
+// CHECK-NEXT: Name: (0)
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -125,24 +125,6 @@ bar15:
// CHECK-NEXT: Section: .text
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .data
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .data
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .bss
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .bss
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: bar10
// CHECK-NEXT: Value: 0x28
// CHECK-NEXT: Size: 0
diff --git a/test/MC/Hexagon/inst_add.ll b/test/MC/Hexagon/inst_add.ll
index 20a7b312ed6db..826776952e050 100644
--- a/test/MC/Hexagon/inst_add.ll
+++ b/test/MC/Hexagon/inst_add.ll
@@ -1,5 +1,5 @@
-;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+; RUN: llc -march=hexagon -filetype=obj %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s
define i32 @foo (i32 %a, i32 %b)
{
@@ -7,4 +7,4 @@ define i32 @foo (i32 %a, i32 %b)
ret i32 %1
}
-; CHECK: 0000 004100f3 00c09f52
+; CHECK: c0 3f 10 58 58103fc0
diff --git a/test/MC/Hexagon/inst_cmp_eq.ll b/test/MC/Hexagon/inst_cmp_eq.ll
index 113db631fa92f..98202368aff35 100644
--- a/test/MC/Hexagon/inst_cmp_eq.ll
+++ b/test/MC/Hexagon/inst_cmp_eq.ll
@@ -1,5 +1,5 @@
;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+;; RUN: | llvm-objdump -d - | FileCheck %s
define i1 @foo (i32 %a, i32 %b)
{
@@ -7,4 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: 0000 004100f2 00404089 00c09f52
+; CHECK: p0 = cmp.eq(r0, r1)
+; CHECK: r0 = p0
+; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_eqi.ll b/test/MC/Hexagon/inst_cmp_eqi.ll
index 70c4c308e1f4c..612dfdc8f23da 100644
--- a/test/MC/Hexagon/inst_cmp_eqi.ll
+++ b/test/MC/Hexagon/inst_cmp_eqi.ll
@@ -1,5 +1,5 @@
;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+;; RUN: | llvm-objdump -d - | FileCheck %s
define i1 @foo (i32 %a)
{
@@ -7,4 +7,6 @@ define i1 @foo (i32 %a)
ret i1 %1
}
-; CHECK: 0000 40450075 00404089 00c09f52
+; CHECK: p0 = cmp.eq(r0, #42)
+; CHECK: r0 = p0
+; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_gt.ll b/test/MC/Hexagon/inst_cmp_gt.ll
index 85fedbfb50347..3ce1c0addad7a 100644
--- a/test/MC/Hexagon/inst_cmp_gt.ll
+++ b/test/MC/Hexagon/inst_cmp_gt.ll
@@ -1,5 +1,5 @@
;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+;; RUN: | llvm-objdump -d - | FileCheck %s
define i1 @foo (i32 %a, i32 %b)
{
@@ -7,4 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: 0000 004140f2 00404089 00c09f52
+; CHECK: p0 = cmp.gt(r0, r1)
+; CHECK: r0 = p0
+; CHECK: jumpr r31 } \ No newline at end of file
diff --git a/test/MC/Hexagon/inst_cmp_gti.ll b/test/MC/Hexagon/inst_cmp_gti.ll
index 18ba3e463ef7f..f3c13a2fb96e6 100644
--- a/test/MC/Hexagon/inst_cmp_gti.ll
+++ b/test/MC/Hexagon/inst_cmp_gti.ll
@@ -1,5 +1,5 @@
;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+;; RUN: | llvm-objdump -d - | FileCheck %s
define i1 @foo (i32 %a)
{
@@ -7,4 +7,6 @@ define i1 @foo (i32 %a)
ret i1 %1
}
-; CHECK: 0000 40454075 00404089 00c09f52
+; CHECK: p0 = cmp.gt(r0, #42)
+; CHECK: r0 = p0
+; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_lt.ll b/test/MC/Hexagon/inst_cmp_lt.ll
index 3a7618421e3d8..80ba16f41418c 100644
--- a/test/MC/Hexagon/inst_cmp_lt.ll
+++ b/test/MC/Hexagon/inst_cmp_lt.ll
@@ -1,5 +1,5 @@
;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+;; RUN: | llvm-objdump -d - | FileCheck %s
define i1 @foo (i32 %a, i32 %b)
{
@@ -7,4 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: 0000 004041f2 00404089 00c09f52
+; CHECK: p0 = cmp.gt(r1, r0)
+; CHECK: r0 = p0
+; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_ugt.ll b/test/MC/Hexagon/inst_cmp_ugt.ll
index 096536f54a7a5..07fa784dc64ad 100644
--- a/test/MC/Hexagon/inst_cmp_ugt.ll
+++ b/test/MC/Hexagon/inst_cmp_ugt.ll
@@ -1,5 +1,5 @@
;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+;; RUN: | llvm-objdump -d - | FileCheck %s
define i1 @foo (i32 %a, i32 %b)
{
@@ -7,4 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: 0000 004160f2 00404089 00c09f52
+; CHECK: p0 = cmp.gtu(r0, r1)
+; CHECK: r0 = p0
+; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_ugti.ll b/test/MC/Hexagon/inst_cmp_ugti.ll
index a83583457d136..59db552b39f4d 100644
--- a/test/MC/Hexagon/inst_cmp_ugti.ll
+++ b/test/MC/Hexagon/inst_cmp_ugti.ll
@@ -1,5 +1,5 @@
;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+;; RUN: | llvm-objdump -d - | FileCheck %s
define i1 @foo (i32 %a)
{
@@ -7,4 +7,6 @@ define i1 @foo (i32 %a)
ret i1 %1
}
-; CHECK: 0000 40458075 00404089 00c09f52
+; CHECK: p0 = cmp.gtu(r0, #42)
+; CHECK: r0 = p0
+; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_ult.ll b/test/MC/Hexagon/inst_cmp_ult.ll
index 4323fa0834d67..c880ac8a229c9 100644
--- a/test/MC/Hexagon/inst_cmp_ult.ll
+++ b/test/MC/Hexagon/inst_cmp_ult.ll
@@ -1,5 +1,5 @@
;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+;; RUN: | llvm-objdump -d - | FileCheck %s
define i1 @foo (i32 %a, i32 %b)
{
@@ -7,4 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: 0000 004061f2 00404089 00c09f52
+; CHECK: p0 = cmp.gtu(r1, r0)
+; CHECK: r0 = p0
+; CHECK: jumpr r31 \ No newline at end of file
diff --git a/test/MC/Hexagon/inst_select.ll b/test/MC/Hexagon/inst_select.ll
index 29a2db0723bd2..9d12c1de73fef 100644
--- a/test/MC/Hexagon/inst_select.ll
+++ b/test/MC/Hexagon/inst_select.ll
@@ -1,5 +1,5 @@
-;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+; RUN: llc -march=hexagon -filetype=obj %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s
define i32 @foo (i1 %a, i32 %b, i32 %c)
{
@@ -7,4 +7,7 @@ define i32 @foo (i1 %a, i32 %b, i32 %c)
ret i32 %1
}
-; CHECK: 0000 00400085 00600174 00608274 00c09f52
+; CHECK: 00 40 00 85 85004000
+; CHECK: 00 40 9f 52 529f4000
+; CHECK: 00 60 01 74 74016000
+; CHECK: 00 e0 82 74 7482e000 \ No newline at end of file
diff --git a/test/MC/Hexagon/inst_sxtb.ll b/test/MC/Hexagon/inst_sxtb.ll
index 4a217420a9607..34219c72c99ee 100644
--- a/test/MC/Hexagon/inst_sxtb.ll
+++ b/test/MC/Hexagon/inst_sxtb.ll
@@ -1,5 +1,5 @@
-;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+; RUN: llc -march=hexagon -filetype=obj %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s
define i32 @foo (i8 %a)
{
@@ -7,4 +7,4 @@ define i32 @foo (i8 %a)
ret i32 %1
}
-; CHECK: 0000 0040a070 00c09f52
+; CHECK: c0 3f 00 55 55003fc0
diff --git a/test/MC/Hexagon/inst_sxth.ll b/test/MC/Hexagon/inst_sxth.ll
index f0bcf584efa9b..5d1223da8e08f 100644
--- a/test/MC/Hexagon/inst_sxth.ll
+++ b/test/MC/Hexagon/inst_sxth.ll
@@ -1,5 +1,5 @@
-;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s
define i32 @foo (i16 %a)
{
@@ -7,4 +7,4 @@ define i32 @foo (i16 %a)
ret i32 %1
}
-; CHECK: 0000 0040e070 00c09f52
+; CHECK: c0 3f 00 54 54003fc0 \ No newline at end of file
diff --git a/test/MC/Hexagon/inst_zxtb.ll b/test/MC/Hexagon/inst_zxtb.ll
index 622c03692b2f4..86da5e52cac1d 100644
--- a/test/MC/Hexagon/inst_zxtb.ll
+++ b/test/MC/Hexagon/inst_zxtb.ll
@@ -1,5 +1,5 @@
-;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+; RUN: llc -march=hexagon -filetype=obj %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s
define i32 @foo (i8 %a)
{
@@ -7,4 +7,4 @@ define i32 @foo (i8 %a)
ret i32 %1
}
-; CHECK: 0000 e05f0076 00c09f52
+; CHECK: c0 3f 00 57 57003fc0
diff --git a/test/MC/Hexagon/inst_zxth.ll b/test/MC/Hexagon/inst_zxth.ll
index 962210b17511c..7b3d1b30648c1 100644
--- a/test/MC/Hexagon/inst_zxth.ll
+++ b/test/MC/Hexagon/inst_zxth.ll
@@ -1,5 +1,5 @@
-;; RUN: llc -mtriple=hexagon-unknown-elf -filetype=obj %s -o - \
-;; RUN: | llvm-objdump -s - | FileCheck %s
+; RUN: llc -march=hexagon -filetype=obj %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s
define i32 @foo (i16 %a)
{
@@ -7,4 +7,4 @@ define i32 @foo (i16 %a)
ret i32 %1
}
-; CHECK: 0000 0040c070 00c09f52
+; CHECK: c0 3f 00 56 56003fc0
diff --git a/test/MC/MachO/absolutize.s b/test/MC/MachO/absolutize.s
index 19917e3bae1c0..8947c0f65e536 100644
--- a/test/MC/MachO/absolutize.s
+++ b/test/MC/MachO/absolutize.s
@@ -6,9 +6,9 @@ _text_b:
xorl %eax,%eax
Ltext_c:
xorl %eax,%eax
-Ltext_d:
+Ltext_d:
xorl %eax,%eax
-
+
movl $(_text_a - _text_b), %eax
Ltext_expr_0 = _text_a - _text_b
movl $(Ltext_expr_0), %eax
@@ -30,9 +30,9 @@ _data_b:
.long 0
Ldata_c:
.long 0
-Ldata_d:
+Ldata_d:
.long 0
-
+
.long _data_a - _data_b
Ldata_expr_0 = _data_a - _data_b
.long Ldata_expr_0
@@ -75,7 +75,7 @@ Ldata_expr_2 = Ldata_d - Ldata_c
// CHECK: ('offset', 324)
// CHECK: ('alignment', 0)
// CHECK: ('reloc_offset', 412)
-// CHECK: ('num_reloc', 7)
+// CHECK: ('num_reloc', 3)
// CHECK: ('flags', 0x80000400)
// CHECK: ('reserved1', 0)
// CHECK: ('reserved2', 0)
@@ -85,21 +85,9 @@ Ldata_expr_2 = Ldata_d - Ldata_c
// CHECK: (('word-0', 0xa0000027),
// CHECK: ('word-1', 0x0)),
// CHECK: # Relocation 1
-// CHECK: (('word-0', 0xa400001d),
-// CHECK: ('word-1', 0x6)),
-// CHECK: # Relocation 2
-// CHECK: (('word-0', 0xa1000000),
-// CHECK: ('word-1', 0x4)),
-// CHECK: # Relocation 3
-// CHECK: (('word-0', 0xa4000013),
-// CHECK: ('word-1', 0x4)),
-// CHECK: # Relocation 4
-// CHECK: (('word-0', 0xa1000000),
-// CHECK: ('word-1', 0x2)),
-// CHECK: # Relocation 5
// CHECK: (('word-0', 0xa4000009),
// CHECK: ('word-1', 0x0)),
-// CHECK: # Relocation 6
+// CHECK: # Relocation 2
// CHECK: (('word-0', 0xa1000000),
// CHECK: ('word-1', 0x2)),
// CHECK: ])
@@ -111,8 +99,8 @@ Ldata_expr_2 = Ldata_d - Ldata_c
// CHECK: ('size', 44)
// CHECK: ('offset', 367)
// CHECK: ('alignment', 0)
-// CHECK: ('reloc_offset', 468)
-// CHECK: ('num_reloc', 7)
+// CHECK: ('reloc_offset', 436)
+// CHECK: ('num_reloc', 3)
// CHECK: ('flags', 0x0)
// CHECK: ('reserved1', 0)
// CHECK: ('reserved2', 0)
@@ -122,21 +110,9 @@ Ldata_expr_2 = Ldata_d - Ldata_c
// CHECK: (('word-0', 0xa0000028),
// CHECK: ('word-1', 0x2b)),
// CHECK: # Relocation 1
-// CHECK: (('word-0', 0xa4000020),
-// CHECK: ('word-1', 0x37)),
-// CHECK: # Relocation 2
-// CHECK: (('word-0', 0xa1000000),
-// CHECK: ('word-1', 0x33)),
-// CHECK: # Relocation 3
-// CHECK: (('word-0', 0xa4000018),
-// CHECK: ('word-1', 0x33)),
-// CHECK: # Relocation 4
-// CHECK: (('word-0', 0xa1000000),
-// CHECK: ('word-1', 0x2f)),
-// CHECK: # Relocation 5
// CHECK: (('word-0', 0xa4000010),
// CHECK: ('word-1', 0x2b)),
-// CHECK: # Relocation 6
+// CHECK: # Relocation 2
// CHECK: (('word-0', 0xa1000000),
// CHECK: ('word-1', 0x2f)),
// CHECK: ])
@@ -146,9 +122,9 @@ Ldata_expr_2 = Ldata_d - Ldata_c
// CHECK: # Load Command 1
// CHECK: (('command', 2)
// CHECK: ('size', 24)
-// CHECK: ('symoff', 524)
+// CHECK: ('symoff', 460)
// CHECK: ('nsyms', 4)
-// CHECK: ('stroff', 572)
+// CHECK: ('stroff', 508)
// CHECK: ('strsize', 36)
// CHECK: ('_string_data', '\x00_text_b\x00_data_b\x00_text_a\x00_data_a\x00\x00\x00\x00')
// CHECK: ('_symbols', [
diff --git a/test/MC/MachO/reloc-diff.s b/test/MC/MachO/reloc-diff.s
index 601edba47b42b..a63a413f24b61 100644
--- a/test/MC/MachO/reloc-diff.s
+++ b/test/MC/MachO/reloc-diff.s
@@ -1,39 +1,27 @@
// RUN: llvm-mc -triple i386-apple-darwin9 %s -filetype=obj -o - | macho-dump | FileCheck %s
// CHECK: # Relocation 0
-// CHECK: (('word-0', 0xa2000014),
+// CHECK: (('word-0', 0xa4000010),
// CHECK: ('word-1', 0x0)),
// CHECK: # Relocation 1
// CHECK: (('word-0', 0xa1000000),
// CHECK: ('word-1', 0x0)),
// CHECK: # Relocation 2
-// CHECK: (('word-0', 0xa4000010),
+// CHECK: (('word-0', 0xa4000008),
// CHECK: ('word-1', 0x0)),
// CHECK: # Relocation 3
// CHECK: (('word-0', 0xa1000000),
// CHECK: ('word-1', 0x0)),
// CHECK: # Relocation 4
-// CHECK: (('word-0', 0xa400000c),
+// CHECK: (('word-0', 0xa4000004),
// CHECK: ('word-1', 0x0)),
// CHECK: # Relocation 5
// CHECK: (('word-0', 0xa1000000),
// CHECK: ('word-1', 0x0)),
// CHECK: # Relocation 6
-// CHECK: (('word-0', 0xa4000008),
-// CHECK: ('word-1', 0x0)),
-// CHECK: # Relocation 7
-// CHECK: (('word-0', 0xa1000000),
-// CHECK: ('word-1', 0x0)),
-// CHECK: # Relocation 8
-// CHECK: (('word-0', 0xa4000004),
-// CHECK: ('word-1', 0x0)),
-// CHECK: # Relocation 9
-// CHECK: (('word-0', 0xa1000000),
-// CHECK: ('word-1', 0x0)),
-// CHECK: # Relocation 10
// CHECK: (('word-0', 0xa2000000),
// CHECK: ('word-1', 0x0)),
-// CHECK: # Relocation 11
+// CHECK: # Relocation 7
// CHECK: (('word-0', 0xa1000000),
// CHECK: ('word-1', 0x0)),
// CHECK-NEXT: ])
@@ -43,7 +31,7 @@ _local_def:
_external_def:
Ltemp:
ret
-
+
.data
.long _external_def - _local_def
.long Ltemp - _local_def
diff --git a/test/MC/Mips/mips-expansions.s b/test/MC/Mips/mips-expansions.s
index b6dc32ed5e34e..d3fdf39ff8b0f 100644
--- a/test/MC/Mips/mips-expansions.s
+++ b/test/MC/Mips/mips-expansions.s
@@ -1,86 +1,85 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 | FileCheck %s
-# Check that the assembler can handle the documented syntax
-# for macro instructions
-#------------------------------------------------------------------------------
-# Load immediate instructions
-#------------------------------------------------------------------------------
+# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 | \
+# RUN: FileCheck %s
+
+# Check that the IAS expands macro instructions in the same way as GAS.
+
+# Load immediate, done by MipsAsmParser::expandLoadImm():
+ li $5, 123
# CHECK: ori $5, $zero, 123 # encoding: [0x7b,0x00,0x05,0x34]
+ li $6, -2345
# CHECK: addiu $6, $zero, -2345 # encoding: [0xd7,0xf6,0x06,0x24]
+ li $7, 65538
# CHECK: lui $7, 1 # encoding: [0x01,0x00,0x07,0x3c]
# CHECK: ori $7, $7, 2 # encoding: [0x02,0x00,0xe7,0x34]
+ li $8, ~7
# CHECK: addiu $8, $zero, -8 # encoding: [0xf8,0xff,0x08,0x24]
+ li $9, 0x10000
# CHECK: lui $9, 1 # encoding: [0x01,0x00,0x09,0x3c]
# CHECK-NOT: ori $9, $9, 0 # encoding: [0x00,0x00,0x29,0x35]
+ li $10, ~(0x101010)
# CHECK: lui $10, 65519 # encoding: [0xef,0xff,0x0a,0x3c]
# CHECK: ori $10, $10, 61423 # encoding: [0xef,0xef,0x4a,0x35]
+# Load address, done by MipsAsmParser::expandLoadAddressReg()
+# and MipsAsmParser::expandLoadAddressImm():
+ la $4, 20
# CHECK: ori $4, $zero, 20 # encoding: [0x14,0x00,0x04,0x34]
+ la $7, 65538
# CHECK: lui $7, 1 # encoding: [0x01,0x00,0x07,0x3c]
# CHECK: ori $7, $7, 2 # encoding: [0x02,0x00,0xe7,0x34]
+ la $4, 20($5)
# CHECK: ori $4, $5, 20 # encoding: [0x14,0x00,0xa4,0x34]
+ la $7, 65538($8)
# CHECK: lui $7, 1 # encoding: [0x01,0x00,0x07,0x3c]
# CHECK: ori $7, $7, 2 # encoding: [0x02,0x00,0xe7,0x34]
# CHECK: addu $7, $7, $8 # encoding: [0x21,0x38,0xe8,0x00]
+ la $8, symbol
# CHECK: lui $8, %hi(symbol) # encoding: [A,A,0x08,0x3c]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_HI, kind: fixup_Mips_HI16
# CHECK: ori $8, $8, %lo(symbol) # encoding: [A,A,0x08,0x35]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_LO, kind: fixup_Mips_LO16
+
+# LW/SW and LDC1/SDC1 of symbol address, done by MipsAsmParser::expandMemInst():
+ .set noat
+ lw $10, symbol($4)
# CHECK: lui $10, %hi(symbol) # encoding: [A,A,0x0a,0x3c]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_HI, kind: fixup_Mips_HI16
# CHECK: addu $10, $10, $4 # encoding: [0x21,0x50,0x44,0x01]
# CHECK: lw $10, %lo(symbol)($10) # encoding: [A,A,0x4a,0x8d]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_LO, kind: fixup_Mips_LO16
+ .set at
+ sw $10, symbol($9)
# CHECK: lui $1, %hi(symbol) # encoding: [A,A,0x01,0x3c]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_HI, kind: fixup_Mips_HI16
# CHECK: addu $1, $1, $9 # encoding: [0x21,0x08,0x29,0x00]
# CHECK: sw $10, %lo(symbol)($1) # encoding: [A,A,0x2a,0xac]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_LO, kind: fixup_Mips_LO16
+
+ lw $10, 655483($4)
# CHECK: lui $10, 10 # encoding: [0x0a,0x00,0x0a,0x3c]
# CHECK: addu $10, $10, $4 # encoding: [0x21,0x50,0x44,0x01]
# CHECK: lw $10, 123($10) # encoding: [0x7b,0x00,0x4a,0x8d]
+ sw $10, 123456($9)
# CHECK: lui $1, 2 # encoding: [0x02,0x00,0x01,0x3c]
# CHECK: addu $1, $1, $9 # encoding: [0x21,0x08,0x29,0x00]
# CHECK: sw $10, 57920($1) # encoding: [0x40,0xe2,0x2a,0xac]
+ lw $8, symbol
# CHECK: lui $8, %hi(symbol) # encoding: [A,A,0x08,0x3c]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_HI, kind: fixup_Mips_HI16
# CHECK-NOT: move $8, $8 # encoding: [0x21,0x40,0x00,0x01]
# CHECK: lw $8, %lo(symbol)($8) # encoding: [A,A,0x08,0x8d]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_LO, kind: fixup_Mips_LO16
+ sw $8, symbol
# CHECK: lui $1, %hi(symbol) # encoding: [A,A,0x01,0x3c]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_HI, kind: fixup_Mips_HI16
# CHECK-NOT: move $1, $1 # encoding: [0x21,0x08,0x20,0x00]
# CHECK: sw $8, %lo(symbol)($1) # encoding: [A,A,0x28,0xac]
# CHECK: # fixup A - offset: 0, value: symbol@ABS_LO, kind: fixup_Mips_LO16
+ ldc1 $f0, symbol
# CHECK: lui $1, %hi(symbol)
# CHECK: ldc1 $f0, %lo(symbol)($1)
+ sdc1 $f0, symbol
# CHECK: lui $1, %hi(symbol)
# CHECK: sdc1 $f0, %lo(symbol)($1)
-
- li $5,123
- li $6,-2345
- li $7,65538
- li $8, ~7
- li $9, 0x10000
- li $10, ~(0x101010)
-
- la $a0, 20
- la $7,65538
- la $a0, 20($a1)
- la $7,65538($8)
- la $t0, symbol
-
- .set noat
- lw $t2, symbol($a0)
- .set at
- sw $t2, symbol($t1)
-
- lw $t2, 655483($a0)
- sw $t2, 123456($t1)
-
- lw $8, symbol
- sw $8, symbol
-
- ldc1 $f0, symbol
- sdc1 $f0, symbol
diff --git a/test/MC/Mips/mips-pdr.s b/test/MC/Mips/mips-pdr.s
index 372c259bb098f..79e824bda64b5 100644
--- a/test/MC/Mips/mips-pdr.s
+++ b/test/MC/Mips/mips-pdr.s
@@ -2,7 +2,7 @@
# RUN: FileCheck %s -check-prefix=ASMOUT
# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -filetype=obj -o - | \
-# RUN: llvm-readobj -s -section-data | \
+# RUN: llvm-readobj -s -section-data -r | \
# RUN: FileCheck %s -check-prefix=OBJOUT
# ASMOUT: .text
@@ -32,16 +32,12 @@
# OBJOUT: }
# We should also check if relocation information was correctly generated.
-# OBJOUT: Section {
-# OBJOUT: Name: .rel.pdr
-# OBJOUT: Type: SHT_REL (0x9)
-# OBJOUT: Flags [ (0x0)
-# OBJOUT: ]
-# OBJOUT: Size: 16
-# OBJOUT: SectionData (
-# OBJOUT: 0000: 00000000 00000202 00000020 00000802
-# OBJOUT: )
-# OBJOUT: }
+# OBJOUT: Relocations [
+# OBJOUT-NEXT: Section (6) .rel.pdr {
+# OBJOUT-NEXT: 0x0 R_MIPS_32 .text 0x0
+# OBJOUT-NEXT: 0x20 R_MIPS_32 _global_foo 0x0
+# OBJOUT-NEXT: }
+# OBJOUT-NEXT: ]
.text
.type _local_foo,@function
diff --git a/test/MC/Mips/mips-relocations.s b/test/MC/Mips/mips-relocations.s
index 6f095d1ecdffe..13cea2f385688 100644
--- a/test/MC/Mips/mips-relocations.s
+++ b/test/MC/Mips/mips-relocations.s
@@ -11,8 +11,8 @@
# CHECK: # fixup A - offset: 0, value: loop_1@GOT, kind: fixup_Mips_GOT_Local
# CHECK: lui $2, %dtprel_hi(_gp_disp) # encoding: [A,A,0x02,0x3c]
# CHECK: # fixup A - offset: 0, value: _gp_disp@DTPREL_HI, kind: fixup_Mips_DTPREL_HI
-# CHECK: addiu $2, $2, %dtprel_hi(_gp_disp) # encoding: [A,A,0x42,0x24]
-# CHECK: # fixup A - offset: 0, value: _gp_disp@DTPREL_HI, kind: fixup_Mips_DTPREL_HI
+# CHECK: addiu $2, $2, %dtprel_lo(_gp_disp) # encoding: [A,A,0x42,0x24]
+# CHECK: # fixup A - offset: 0, value: _gp_disp@DTPREL_LO, kind: fixup_Mips_DTPREL_LO
# CHECK: lw $3, %got(loop_1)($2) # encoding: [A,A,0x43,0x8c]
# CHECK: # fixup A - offset: 0, value: loop_1@GOT, kind: fixup_Mips_GOT_Local
# CHECK: lw $4, %got_disp(loop_2)($3) # encoding: [A,A,0x64,0x8c]
@@ -26,15 +26,15 @@
# CHECK: addiu $2, $2, %tprel_lo(_gp_disp) # encoding: [A,A,0x42,0x24]
# CHECK: # fixup A - offset: 0, value: _gp_disp@TPREL_LO, kind: fixup_Mips_TPREL_LO
- lui $2, %hi(_gp_disp)
- addiu $2, $2, %lo(_gp_disp)
- lw $25, %call16(strchr)($gp)
+ lui $2, %hi(_gp_disp)
+ addiu $2, $2, %lo(_gp_disp)
+ lw $25, %call16(strchr)($gp)
lw $3, %got(loop_1)($2)
- lui $2, %dtprel_hi(_gp_disp)
- addiu $2, $2, %dtprel_hi(_gp_disp)
- lw $3, %got(loop_1)($2)
- lw $4, %got_disp(loop_2)($3)
- lw $5, %got_page(loop_3)($4)
- lw $6, %got_ofst(loop_4)($5)
- lui $2, %tprel_hi(_gp_disp)
- addiu $2, $2, %tprel_lo(_gp_disp)
+ lui $2, %dtprel_hi(_gp_disp)
+ addiu $2, $2, %dtprel_lo(_gp_disp)
+ lw $3, %got(loop_1)($2)
+ lw $4, %got_disp(loop_2)($3)
+ lw $5, %got_page(loop_3)($4)
+ lw $6, %got_ofst(loop_4)($5)
+ lui $2, %tprel_hi(_gp_disp)
+ addiu $2, $2, %tprel_lo(_gp_disp)
diff --git a/test/MC/Mips/octeon-instructions.s b/test/MC/Mips/octeon-instructions.s
index 34830c0fc6eb2..cbb99088ea1a8 100644
--- a/test/MC/Mips/octeon-instructions.s
+++ b/test/MC/Mips/octeon-instructions.s
@@ -15,6 +15,8 @@
# CHECK: cins32 $22, $22, 9, 22 # encoding: [0x72,0xd6,0xb2,0x73]
# CHECK: cins32 $24, $ra, 0, 31 # encoding: [0x73,0xf8,0xf8,0x33]
# CHECK: cins32 $15, $15, 5, 5 # encoding: [0x71,0xef,0x29,0x73]
+# CHECK: dmtc2 $2, 16455 # encoding: [0x48,0xa2,0x40,0x47]
+# CHECK: dmfc2 $2, 64 # encoding: [0x48,0x22,0x00,0x40]
# CHECK: dmul $9, $6, $7 # encoding: [0x70,0xc7,0x48,0x03]
# CHECK: dmul $19, $24, $25 # encoding: [0x73,0x19,0x98,0x03]
# CHECK: dmul $9, $9, $6 # encoding: [0x71,0x26,0x48,0x03]
@@ -72,6 +74,8 @@ foo:
cins32 $22, 9, 22
cins $24, $31, 32, 31
cins $15, 37, 5
+ dmtc2 $2, 0x4047
+ dmfc2 $2, 0x0040
dmul $9, $6, $7
dmul $19, $24, $25
dmul $9, $6
diff --git a/test/MC/Mips/relocation.s b/test/MC/Mips/relocation.s
new file mode 100644
index 0000000000000..642b40960a657
--- /dev/null
+++ b/test/MC/Mips/relocation.s
@@ -0,0 +1,10 @@
+// RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux < %s | llvm-readobj -r | FileCheck %s
+
+// Test that we produce the correct relocation.
+// FIXME: move more relocation only tests here.
+
+ .long foo
+// CHECK: R_MIPS_32 foo
+
+ .long foo-.
+// CHECK: R_MIPS_PC32 foo
diff --git a/test/MC/Mips/set-push-pop-directives-bad.s b/test/MC/Mips/set-push-pop-directives-bad.s
index 53d8b2308153f..8994eea1c8bbe 100644
--- a/test/MC/Mips/set-push-pop-directives-bad.s
+++ b/test/MC/Mips/set-push-pop-directives-bad.s
@@ -12,3 +12,12 @@
# CHECK: :[[@LINE-1]]:19: error: unexpected token, expected end of statement
.set pop bar
# CHECK: :[[@LINE-1]]:18: error: unexpected token, expected end of statement
+
+ .set hardfloat
+ .set push
+ .set softfloat
+ add.s $f2, $f2, $f2
+# CHECK: :[[@LINE-1]]:9: error: instruction requires a CPU feature not currently enabled
+ .set pop
+ add.s $f2, $f2, $f2
+# CHECK-NOT: :[[@LINE-1]]:9: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/set-push-pop-directives.s b/test/MC/Mips/set-push-pop-directives.s
index 5f55b7c7e4d30..3a0b2aecc5874 100644
--- a/test/MC/Mips/set-push-pop-directives.s
+++ b/test/MC/Mips/set-push-pop-directives.s
@@ -51,3 +51,20 @@
# CHECK: b 1336
# CHECK: nop
# CHECK: addvi.b $w15, $w13, 18
+
+ .set push
+ .set dsp
+ lbux $7, $10($11)
+ .set pop
+
+ .set push
+ .set dsp
+ lbux $7, $10($11)
+# CHECK-NOT: :[[@LINE-1]]:5: error: instruction requires a CPU feature not currently enabled
+ .set pop
+
+ .set push
+ .set dsp
+ lbux $7, $10($11)
+# CHECK-NOT: :[[@LINE-1]]:5: error: instruction requires a CPU feature not currently enabled
+ .set pop
diff --git a/test/MC/Mips/set-softfloat-hardfloat-bad.s b/test/MC/Mips/set-softfloat-hardfloat-bad.s
new file mode 100644
index 0000000000000..14b1e78afe176
--- /dev/null
+++ b/test/MC/Mips/set-softfloat-hardfloat-bad.s
@@ -0,0 +1,14 @@
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -mcpu=mips32 -mattr=+soft-float 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set hardfloat
+ add.s $f2, $f2, $f2
+ # CHECK-NOT: :[[@LINE-1]]:3: error: instruction requires a CPU feature not currently enabled
+ sub.s $f2, $f2, $f2
+ # CHECK-NOT: :[[@LINE-1]]:3: error: instruction requires a CPU feature not currently enabled
+
+ .set softfloat
+ add.s $f2, $f2, $f2
+ # CHECK: :[[@LINE-1]]:3: error: instruction requires a CPU feature not currently enabled
+ sub.s $f2, $f2, $f2
+ # CHECK: :[[@LINE-1]]:3: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/set-softfloat-hardfloat.s b/test/MC/Mips/set-softfloat-hardfloat.s
new file mode 100644
index 0000000000000..ffb10f3ec5804
--- /dev/null
+++ b/test/MC/Mips/set-softfloat-hardfloat.s
@@ -0,0 +1,12 @@
+# RUN: llvm-mc %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+soft-float | \
+# RUN: FileCheck %s
+
+ .set hardfloat
+ add.s $f2, $f2, $f2
+ sub.s $f2, $f2, $f2
+ .set softfloat
+
+# CHECK: .set hardfloat
+# CHECK: add.s $f2, $f2, $f2
+# CHECK: sub.s $f2, $f2, $f2
+# CHECK: .set softfloat
diff --git a/test/MC/PowerPC/st-other-crash.s b/test/MC/PowerPC/st-other-crash.s
new file mode 100644
index 0000000000000..fcc56ad70c56e
--- /dev/null
+++ b/test/MC/PowerPC/st-other-crash.s
@@ -0,0 +1,21 @@
+// RUN: llvm-mc < %s -filetype=obj -triple powerpc64le-pc-linux | \
+// RUN: llvm-readobj -t | FileCheck %s
+
+// This used to crash. Make sure it produce the correct symbol.
+
+// CHECK: Symbol {
+// CHECK: Name: _ZN4llvm11SmallVectorIcLj0EEC2Ev (12)
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: None (0x0)
+// CHECK-NEXT: Other: 64
+// CHECK-NEXT: Section: .group (0x5)
+// CHECK-NEXT: }
+
+
+ .section .text._ZN4llvm11SmallVectorIcLj0EEC2Ev,"axG",@progbits,_ZN4llvm11SmallVectorIcLj0EEC2Ev,comdat
+.Ltmp2:
+ addis 2, 12, .TOC.-.Ltmp2@ha
+.Ltmp3:
+ .localentry _ZN4llvm11SmallVectorIcLj0EEC2Ev, .Ltmp3-.Ltmp2
diff --git a/test/MC/PowerPC/vsx.s b/test/MC/PowerPC/vsx.s
index 773fc9eef6d16..352fc51738000 100644
--- a/test/MC/PowerPC/vsx.s
+++ b/test/MC/PowerPC/vsx.s
@@ -95,6 +95,12 @@
# CHECK-BE: xsmaddmdp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0x4c]
# CHECK-LE: xsmaddmdp 7, 63, 27 # encoding: [0x4c,0xd9,0xff,0xf0]
xsmaddmdp 7, 63, 27
+# CHECK-BE: xsmaddasp 7, 63, 27 # encoding: [0xf0,0xff,0xd8,0x0c]
+# CHECK-LE: xsmaddasp 7, 63, 27 # encoding: [0x0c,0xd8,0xff,0xf0]
+ xsmaddasp 7, 63, 27
+# CHECK-BE: xsmaddmsp 7, 63, 27 # encoding: [0xf0,0xff,0xd8,0x4c]
+# CHECK-LE: xsmaddmsp 7, 63, 27 # encoding: [0x4c,0xd8,0xff,0xf0]
+ xsmaddmsp 7, 63, 27
# CHECK-BE: xsmaxdp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x04]
# CHECK-LE: xsmaxdp 7, 63, 27 # encoding: [0x04,0xdd,0xff,0xf0]
xsmaxdp 7, 63, 27
@@ -107,6 +113,12 @@
# CHECK-BE: xsmsubmdp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0xcc]
# CHECK-LE: xsmsubmdp 7, 63, 27 # encoding: [0xcc,0xd9,0xff,0xf0]
xsmsubmdp 7, 63, 27
+# CHECK-BE: xsmsubasp 7, 63, 27 # encoding: [0xf0,0xff,0xd8,0x8c]
+# CHECK-LE: xsmsubasp 7, 63, 27 # encoding: [0x8c,0xd8,0xff,0xf0]
+ xsmsubasp 7, 63, 27
+# CHECK-BE: xsmsubmsp 7, 63, 27 # encoding: [0xf0,0xff,0xd8,0xcc]
+# CHECK-LE: xsmsubmsp 7, 63, 27 # encoding: [0xcc,0xd8,0xff,0xf0]
+ xsmsubmsp 7, 63, 27
# CHECK-BE: xsmulsp 7, 63, 27 # encoding: [0xf0,0xff,0xd8,0x84]
# CHECK-LE: xsmulsp 7, 63, 27 # encoding: [0x84,0xd8,0xff,0xf0]
xsmulsp 7, 63, 27
@@ -131,6 +143,18 @@
# CHECK-BE: xsnmsubmdp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0xcc]
# CHECK-LE: xsnmsubmdp 7, 63, 27 # encoding: [0xcc,0xdd,0xff,0xf0]
xsnmsubmdp 7, 63, 27
+# CHECK-BE: xsnmaddasp 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0x0c]
+# CHECK-LE: xsnmaddasp 7, 63, 27 # encoding: [0x0c,0xdc,0xff,0xf0]
+ xsnmaddasp 7, 63, 27
+# CHECK-BE: xsnmaddmsp 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0x4c]
+# CHECK-LE: xsnmaddmsp 7, 63, 27 # encoding: [0x4c,0xdc,0xff,0xf0]
+ xsnmaddmsp 7, 63, 27
+# CHECK-BE: xsnmsubasp 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0x8c]
+# CHECK-LE: xsnmsubasp 7, 63, 27 # encoding: [0x8c,0xdc,0xff,0xf0]
+ xsnmsubasp 7, 63, 27
+# CHECK-BE: xsnmsubmsp 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0xcc]
+# CHECK-LE: xsnmsubmsp 7, 63, 27 # encoding: [0xcc,0xdc,0xff,0xf0]
+ xsnmsubmsp 7, 63, 27
# CHECK-BE: xsrdpi 7, 27 # encoding: [0xf0,0xe0,0xd9,0x24]
# CHECK-LE: xsrdpi 7, 27 # encoding: [0x24,0xd9,0xe0,0xf0]
xsrdpi 7, 27
diff --git a/test/MC/X86/avx512-encodings.s b/test/MC/X86/avx512-encodings.s
index cb71a00381b13..ca0fccb2e3efa 100644
--- a/test/MC/X86/avx512-encodings.s
+++ b/test/MC/X86/avx512-encodings.s
@@ -4796,6 +4796,38 @@
// CHECK: encoding: [0x62,0x61,0x7c,0x58,0x51,0xa2,0xfc,0xfd,0xff,0xff]
vsqrtps -516(%rdx){1to16}, %zmm28
+// CHECK: vsqrtpd {rn-sae}, %zmm19, %zmm19
+// CHECK: encoding: [0x62,0xa1,0xfd,0x18,0x51,0xdb]
+ vsqrtpd {rn-sae}, %zmm19, %zmm19
+
+// CHECK: vsqrtpd {ru-sae}, %zmm19, %zmm19
+// CHECK: encoding: [0x62,0xa1,0xfd,0x58,0x51,0xdb]
+ vsqrtpd {ru-sae}, %zmm19, %zmm19
+
+// CHECK: vsqrtpd {rd-sae}, %zmm19, %zmm19
+// CHECK: encoding: [0x62,0xa1,0xfd,0x38,0x51,0xdb]
+ vsqrtpd {rd-sae}, %zmm19, %zmm19
+
+// CHECK: vsqrtpd {rz-sae}, %zmm19, %zmm19
+// CHECK: encoding: [0x62,0xa1,0xfd,0x78,0x51,0xdb]
+ vsqrtpd {rz-sae}, %zmm19, %zmm19
+
+// CHECK: vsqrtps {rn-sae}, %zmm29, %zmm28
+// CHECK: encoding: [0x62,0x01,0x7c,0x18,0x51,0xe5]
+ vsqrtps {rn-sae}, %zmm29, %zmm28
+
+// CHECK: vsqrtps {ru-sae}, %zmm29, %zmm28
+// CHECK: encoding: [0x62,0x01,0x7c,0x58,0x51,0xe5]
+ vsqrtps {ru-sae}, %zmm29, %zmm28
+
+// CHECK: vsqrtps {rd-sae}, %zmm29, %zmm28
+// CHECK: encoding: [0x62,0x01,0x7c,0x38,0x51,0xe5]
+ vsqrtps {rd-sae}, %zmm29, %zmm28
+
+// CHECK: vsqrtps {rz-sae}, %zmm29, %zmm28
+// CHECK: encoding: [0x62,0x01,0x7c,0x78,0x51,0xe5]
+ vsqrtps {rz-sae}, %zmm29, %zmm28
+
// CHECK: vsubpd %zmm9, %zmm12, %zmm9
// CHECK: encoding: [0x62,0x51,0x9d,0x48,0x5c,0xc9]
vsubpd %zmm9, %zmm12, %zmm9
@@ -6052,6 +6084,66 @@ valignq $2, 0x100(%rsp), %zmm0, %zmm1
// CHECK: encoding: [0x62,0xf3,0xfd,0x49,0x03,0xcb,0x03]
valignq $3, %zmm3, %zmm0, %zmm1 {%k1}
+// CHECK: valignq $171, %zmm23, %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x23,0xdd,0x48,0x03,0xe7,0xab]
+ valignq $0xab, %zmm23, %zmm4, %zmm28
+
+// CHECK: valignq $171, %zmm23, %zmm4, %zmm28 {%k3}
+// CHECK: encoding: [0x62,0x23,0xdd,0x4b,0x03,0xe7,0xab]
+ valignq $0xab, %zmm23, %zmm4, %zmm28 {%k3}
+
+// CHECK: valignq $171, %zmm23, %zmm4, %zmm28 {%k3} {z}
+// CHECK: encoding: [0x62,0x23,0xdd,0xcb,0x03,0xe7,0xab]
+ valignq $0xab, %zmm23, %zmm4, %zmm28 {%k3} {z}
+
+// CHECK: valignq $123, %zmm23, %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x23,0xdd,0x48,0x03,0xe7,0x7b]
+ valignq $0x7b, %zmm23, %zmm4, %zmm28
+
+// CHECK: valignq $123, (%rcx), %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x48,0x03,0x21,0x7b]
+ valignq $0x7b, (%rcx), %zmm4, %zmm28
+
+// CHECK: valignq $123, 291(%rax,%r14,8), %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x23,0xdd,0x48,0x03,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ valignq $0x7b, 291(%rax,%r14,8), %zmm4, %zmm28
+
+// CHECK: valignq $123, (%rcx){1to8}, %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x58,0x03,0x21,0x7b]
+ valignq $0x7b, (%rcx){1to8}, %zmm4, %zmm28
+
+// CHECK: valignq $123, 8128(%rdx), %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x48,0x03,0x62,0x7f,0x7b]
+ valignq $0x7b, 8128(%rdx), %zmm4, %zmm28
+
+// CHECK: valignq $123, 8192(%rdx), %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x48,0x03,0xa2,0x00,0x20,0x00,0x00,0x7b]
+ valignq $0x7b, 8192(%rdx), %zmm4, %zmm28
+
+// CHECK: valignq $123, -8192(%rdx), %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x48,0x03,0x62,0x80,0x7b]
+ valignq $0x7b, -8192(%rdx), %zmm4, %zmm28
+
+// CHECK: valignq $123, -8256(%rdx), %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x48,0x03,0xa2,0xc0,0xdf,0xff,0xff,0x7b]
+ valignq $0x7b, -8256(%rdx), %zmm4, %zmm28
+
+// CHECK: valignq $123, 1016(%rdx){1to8}, %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x58,0x03,0x62,0x7f,0x7b]
+ valignq $0x7b, 1016(%rdx){1to8}, %zmm4, %zmm28
+
+// CHECK: valignq $123, 1024(%rdx){1to8}, %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x58,0x03,0xa2,0x00,0x04,0x00,0x00,0x7b]
+ valignq $0x7b, 1024(%rdx){1to8}, %zmm4, %zmm28
+
+// CHECK: valignq $123, -1024(%rdx){1to8}, %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x58,0x03,0x62,0x80,0x7b]
+ valignq $0x7b, -1024(%rdx){1to8}, %zmm4, %zmm28
+
+// CHECK: valignq $123, -1032(%rdx){1to8}, %zmm4, %zmm28
+// CHECK: encoding: [0x62,0x63,0xdd,0x58,0x03,0xa2,0xf8,0xfb,0xff,0xff,0x7b]
+ valignq $0x7b, -1032(%rdx){1to8}, %zmm4, %zmm28
+
// CHECK: vextractf32x4 $3
// CHECK: encoding: [0x62,0xf3,0x7d,0x49,0x19,0xd9,0x03]
vextractf32x4 $3, %zmm3, %xmm1 {%k1}
@@ -7724,3 +7816,1000 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
// CHECK: encoding: [0x62,0xf1,0x95,0x50,0x72,0xa2,0xf8,0xfb,0xff,0xff,0x7b]
vpsraq $123, -1032(%rdx){1to8}, %zmm29
+// CHECK: vfixupimmps $171, %zmm2, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x40,0x54,0xfa,0xab]
+ vfixupimmps $0xab, %zmm2, %zmm26, %zmm15
+
+// CHECK: vfixupimmps $171, %zmm2, %zmm26, %zmm15 {%k4}
+// CHECK: encoding: [0x62,0x73,0x2d,0x44,0x54,0xfa,0xab]
+ vfixupimmps $0xab, %zmm2, %zmm26, %zmm15 {%k4}
+
+// CHECK: vfixupimmps $171, %zmm2, %zmm26, %zmm15 {%k4} {z}
+// CHECK: encoding: [0x62,0x73,0x2d,0xc4,0x54,0xfa,0xab]
+ vfixupimmps $0xab, %zmm2, %zmm26, %zmm15 {%k4} {z}
+
+// CHECK: vfixupimmps $171,{sae}, %zmm2, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x10,0x54,0xfa,0xab]
+ vfixupimmps $0xab,{sae}, %zmm2, %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, %zmm2, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x40,0x54,0xfa,0x7b]
+ vfixupimmps $0x7b, %zmm2, %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123,{sae}, %zmm2, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x10,0x54,0xfa,0x7b]
+ vfixupimmps $0x7b,{sae}, %zmm2, %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, (%rcx), %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x40,0x54,0x39,0x7b]
+ vfixupimmps $0x7b, (%rcx), %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, 291(%rax,%r14,8), %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x33,0x2d,0x40,0x54,0xbc,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 291(%rax,%r14,8), %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, (%rcx){1to16}, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x50,0x54,0x39,0x7b]
+ vfixupimmps $0x7b, (%rcx){1to16}, %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, 8128(%rdx), %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x40,0x54,0x7a,0x7f,0x7b]
+ vfixupimmps $0x7b, 8128(%rdx), %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, 8192(%rdx), %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x40,0x54,0xba,0x00,0x20,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 8192(%rdx), %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, -8192(%rdx), %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x40,0x54,0x7a,0x80,0x7b]
+ vfixupimmps $0x7b, -8192(%rdx), %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, -8256(%rdx), %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x40,0x54,0xba,0xc0,0xdf,0xff,0xff,0x7b]
+ vfixupimmps $0x7b, -8256(%rdx), %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, 508(%rdx){1to16}, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x50,0x54,0x7a,0x7f,0x7b]
+ vfixupimmps $0x7b, 508(%rdx){1to16}, %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, 512(%rdx){1to16}, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x50,0x54,0xba,0x00,0x02,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 512(%rdx){1to16}, %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, -512(%rdx){1to16}, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x50,0x54,0x7a,0x80,0x7b]
+ vfixupimmps $0x7b, -512(%rdx){1to16}, %zmm26, %zmm15
+
+// CHECK: vfixupimmps $123, -516(%rdx){1to16}, %zmm26, %zmm15
+// CHECK: encoding: [0x62,0x73,0x2d,0x50,0x54,0xba,0xfc,0xfd,0xff,0xff,0x7b]
+ vfixupimmps $0x7b, -516(%rdx){1to16}, %zmm26, %zmm15
+
+// CHECK: vfixupimmpd $171, %zmm19, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x33,0xd5,0x40,0x54,0xcb,0xab]
+ vfixupimmpd $0xab, %zmm19, %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $171, %zmm19, %zmm21, %zmm9 {%k2}
+// CHECK: encoding: [0x62,0x33,0xd5,0x42,0x54,0xcb,0xab]
+ vfixupimmpd $0xab, %zmm19, %zmm21, %zmm9 {%k2}
+
+// CHECK: vfixupimmpd $171, %zmm19, %zmm21, %zmm9 {%k2} {z}
+// CHECK: encoding: [0x62,0x33,0xd5,0xc2,0x54,0xcb,0xab]
+ vfixupimmpd $0xab, %zmm19, %zmm21, %zmm9 {%k2} {z}
+
+// CHECK: vfixupimmpd $171,{sae}, %zmm19, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x33,0xd5,0x10,0x54,0xcb,0xab]
+ vfixupimmpd $0xab,{sae}, %zmm19, %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, %zmm19, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x33,0xd5,0x40,0x54,0xcb,0x7b]
+ vfixupimmpd $0x7b, %zmm19, %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123,{sae}, %zmm19, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x33,0xd5,0x10,0x54,0xcb,0x7b]
+ vfixupimmpd $0x7b,{sae}, %zmm19, %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, (%rcx), %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x40,0x54,0x09,0x7b]
+ vfixupimmpd $0x7b, (%rcx), %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, 291(%rax,%r14,8), %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x33,0xd5,0x40,0x54,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 291(%rax,%r14,8), %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, (%rcx){1to8}, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x50,0x54,0x09,0x7b]
+ vfixupimmpd $0x7b, (%rcx){1to8}, %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, 8128(%rdx), %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x40,0x54,0x4a,0x7f,0x7b]
+ vfixupimmpd $0x7b, 8128(%rdx), %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, 8192(%rdx), %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x40,0x54,0x8a,0x00,0x20,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 8192(%rdx), %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, -8192(%rdx), %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x40,0x54,0x4a,0x80,0x7b]
+ vfixupimmpd $0x7b, -8192(%rdx), %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, -8256(%rdx), %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x40,0x54,0x8a,0xc0,0xdf,0xff,0xff,0x7b]
+ vfixupimmpd $0x7b, -8256(%rdx), %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, 1016(%rdx){1to8}, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x50,0x54,0x4a,0x7f,0x7b]
+ vfixupimmpd $0x7b, 1016(%rdx){1to8}, %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, 1024(%rdx){1to8}, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x50,0x54,0x8a,0x00,0x04,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 1024(%rdx){1to8}, %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, -1024(%rdx){1to8}, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x50,0x54,0x4a,0x80,0x7b]
+ vfixupimmpd $0x7b, -1024(%rdx){1to8}, %zmm21, %zmm9
+
+// CHECK: vfixupimmpd $123, -1032(%rdx){1to8}, %zmm21, %zmm9
+// CHECK: encoding: [0x62,0x73,0xd5,0x50,0x54,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+ vfixupimmpd $0x7b, -1032(%rdx){1to8}, %zmm21, %zmm9
+
+// CHECK: vfixupimmss $171, %xmm28, %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x13,0x6d,0x00,0x55,0xfc,0xab]
+ vfixupimmss $0xab, %xmm28, %xmm18, %xmm15
+
+// CHECK: vfixupimmss $171, %xmm28, %xmm18, %xmm15 {%k5}
+// CHECK: encoding: [0x62,0x13,0x6d,0x05,0x55,0xfc,0xab]
+ vfixupimmss $0xab, %xmm28, %xmm18, %xmm15 {%k5}
+
+// CHECK: vfixupimmss $171, %xmm28, %xmm18, %xmm15 {%k5} {z}
+// CHECK: encoding: [0x62,0x13,0x6d,0x85,0x55,0xfc,0xab]
+ vfixupimmss $0xab, %xmm28, %xmm18, %xmm15 {%k5} {z}
+
+// CHECK: vfixupimmss $171,{sae}, %xmm28, %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x13,0x6d,0x10,0x55,0xfc,0xab]
+ vfixupimmss $0xab,{sae}, %xmm28, %xmm18, %xmm15
+
+// CHECK: vfixupimmss $123, %xmm28, %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x13,0x6d,0x00,0x55,0xfc,0x7b]
+ vfixupimmss $0x7b, %xmm28, %xmm18, %xmm15
+
+// CHECK: vfixupimmss $123,{sae}, %xmm28, %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x13,0x6d,0x10,0x55,0xfc,0x7b]
+ vfixupimmss $0x7b,{sae}, %xmm28, %xmm18, %xmm15
+
+// CHECK: vfixupimmss $123, (%rcx), %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0x39,0x7b]
+ vfixupimmss $0x7b, (%rcx), %xmm18, %xmm15
+
+// CHECK: vfixupimmss $123, 291(%rax,%r14,8), %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x33,0x6d,0x00,0x55,0xbc,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmss $0x7b, 291(%rax,%r14,8), %xmm18, %xmm15
+
+// CHECK: vfixupimmss $123, 508(%rdx), %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0x7a,0x7f,0x7b]
+ vfixupimmss $0x7b, 508(%rdx), %xmm18, %xmm15
+
+// CHECK: vfixupimmss $123, 512(%rdx), %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0xba,0x00,0x02,0x00,0x00,0x7b]
+ vfixupimmss $0x7b, 512(%rdx), %xmm18, %xmm15
+
+// CHECK: vfixupimmss $123, -512(%rdx), %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0x7a,0x80,0x7b]
+ vfixupimmss $0x7b, -512(%rdx), %xmm18, %xmm15
+
+// CHECK: vfixupimmss $123, -516(%rdx), %xmm18, %xmm15
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0xba,0xfc,0xfd,0xff,0xff,0x7b]
+ vfixupimmss $0x7b, -516(%rdx), %xmm18, %xmm15
+
+// CHECK: vfixupimmsd $171, %xmm5, %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0xed,0xab]
+ vfixupimmsd $0xab, %xmm5, %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $171, %xmm5, %xmm26, %xmm13 {%k6}
+// CHECK: encoding: [0x62,0x73,0xad,0x06,0x55,0xed,0xab]
+ vfixupimmsd $0xab, %xmm5, %xmm26, %xmm13 {%k6}
+
+// CHECK: vfixupimmsd $171, %xmm5, %xmm26, %xmm13 {%k6} {z}
+// CHECK: encoding: [0x62,0x73,0xad,0x86,0x55,0xed,0xab]
+ vfixupimmsd $0xab, %xmm5, %xmm26, %xmm13 {%k6} {z}
+
+// CHECK: vfixupimmsd $171,{sae}, %xmm5, %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x10,0x55,0xed,0xab]
+ vfixupimmsd $0xab,{sae}, %xmm5, %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $123, %xmm5, %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0xed,0x7b]
+ vfixupimmsd $0x7b, %xmm5, %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $123,{sae}, %xmm5, %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x10,0x55,0xed,0x7b]
+ vfixupimmsd $0x7b,{sae}, %xmm5, %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $123, (%rcx), %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0x29,0x7b]
+ vfixupimmsd $0x7b, (%rcx), %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $123, 291(%rax,%r14,8), %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x33,0xad,0x00,0x55,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmsd $0x7b, 291(%rax,%r14,8), %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $123, 1016(%rdx), %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0x6a,0x7f,0x7b]
+ vfixupimmsd $0x7b, 1016(%rdx), %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $123, 1024(%rdx), %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0xaa,0x00,0x04,0x00,0x00,0x7b]
+ vfixupimmsd $0x7b, 1024(%rdx), %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $123, -1024(%rdx), %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0x6a,0x80,0x7b]
+ vfixupimmsd $0x7b, -1024(%rdx), %xmm26, %xmm13
+
+// CHECK: vfixupimmsd $123, -1032(%rdx), %xmm26, %xmm13
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0xaa,0xf8,0xfb,0xff,0xff,0x7b]
+ vfixupimmsd $0x7b, -1032(%rdx), %xmm26, %xmm13
+
+// CHECK: vpshufd $171, %zmm25, %zmm19
+// CHECK: encoding: [0x62,0x81,0x7d,0x48,0x70,0xd9,0xab]
+ vpshufd $171, %zmm25, %zmm19
+
+// CHECK: vpshufd $171, %zmm25, %zmm19 {%k6}
+// CHECK: encoding: [0x62,0x81,0x7d,0x4e,0x70,0xd9,0xab]
+ vpshufd $171, %zmm25, %zmm19 {%k6}
+
+// CHECK: vpshufd $171, %zmm25, %zmm19 {%k6} {z}
+// CHECK: encoding: [0x62,0x81,0x7d,0xce,0x70,0xd9,0xab]
+ vpshufd $171, %zmm25, %zmm19 {%k6} {z}
+
+// CHECK: vpshufd $123, %zmm25, %zmm19
+// CHECK: encoding: [0x62,0x81,0x7d,0x48,0x70,0xd9,0x7b]
+ vpshufd $123, %zmm25, %zmm19
+
+// CHECK: vpshufd $123, (%rcx), %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x48,0x70,0x19,0x7b]
+ vpshufd $123, (%rcx), %zmm19
+
+// CHECK: vpshufd $123, 291(%rax,%r14,8), %zmm19
+// CHECK: encoding: [0x62,0xa1,0x7d,0x48,0x70,0x9c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpshufd $123, 291(%rax,%r14,8), %zmm19
+
+// CHECK: vpshufd $123, (%rcx){1to16}, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x58,0x70,0x19,0x7b]
+ vpshufd $123, (%rcx){1to16}, %zmm19
+
+// CHECK: vpshufd $123, 8128(%rdx), %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x48,0x70,0x5a,0x7f,0x7b]
+ vpshufd $123, 8128(%rdx), %zmm19
+
+// CHECK: vpshufd $123, 8192(%rdx), %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x48,0x70,0x9a,0x00,0x20,0x00,0x00,0x7b]
+ vpshufd $123, 8192(%rdx), %zmm19
+
+// CHECK: vpshufd $123, -8192(%rdx), %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x48,0x70,0x5a,0x80,0x7b]
+ vpshufd $123, -8192(%rdx), %zmm19
+
+// CHECK: vpshufd $123, -8256(%rdx), %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x48,0x70,0x9a,0xc0,0xdf,0xff,0xff,0x7b]
+ vpshufd $123, -8256(%rdx), %zmm19
+
+// CHECK: vpshufd $123, 508(%rdx){1to16}, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x58,0x70,0x5a,0x7f,0x7b]
+ vpshufd $123, 508(%rdx){1to16}, %zmm19
+
+// CHECK: vpshufd $123, 512(%rdx){1to16}, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x58,0x70,0x9a,0x00,0x02,0x00,0x00,0x7b]
+ vpshufd $123, 512(%rdx){1to16}, %zmm19
+
+// CHECK: vpshufd $123, -512(%rdx){1to16}, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x58,0x70,0x5a,0x80,0x7b]
+ vpshufd $123, -512(%rdx){1to16}, %zmm19
+
+// CHECK: vpshufd $123, -516(%rdx){1to16}, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x7d,0x58,0x70,0x9a,0xfc,0xfd,0xff,0xff,0x7b]
+ vpshufd $123, -516(%rdx){1to16}, %zmm19
+
+// CHECK: vgetexppd %zmm25, %zmm14
+// CHECK: encoding: [0x62,0x12,0xfd,0x48,0x42,0xf1]
+ vgetexppd %zmm25, %zmm14
+
+// CHECK: vgetexppd %zmm25, %zmm14 {%k5}
+// CHECK: encoding: [0x62,0x12,0xfd,0x4d,0x42,0xf1]
+ vgetexppd %zmm25, %zmm14 {%k5}
+
+// CHECK: vgetexppd %zmm25, %zmm14 {%k5} {z}
+// CHECK: encoding: [0x62,0x12,0xfd,0xcd,0x42,0xf1]
+ vgetexppd %zmm25, %zmm14 {%k5} {z}
+
+// CHECK: vgetexppd {sae}, %zmm25, %zmm14
+// CHECK: encoding: [0x62,0x12,0xfd,0x18,0x42,0xf1]
+ vgetexppd {sae}, %zmm25, %zmm14
+
+// CHECK: vgetexppd (%rcx), %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x48,0x42,0x31]
+ vgetexppd (%rcx), %zmm14
+
+// CHECK: vgetexppd 291(%rax,%r14,8), %zmm14
+// CHECK: encoding: [0x62,0x32,0xfd,0x48,0x42,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vgetexppd 291(%rax,%r14,8), %zmm14
+
+// CHECK: vgetexppd (%rcx){1to8}, %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x58,0x42,0x31]
+ vgetexppd (%rcx){1to8}, %zmm14
+
+// CHECK: vgetexppd 8128(%rdx), %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x48,0x42,0x72,0x7f]
+ vgetexppd 8128(%rdx), %zmm14
+
+// CHECK: vgetexppd 8192(%rdx), %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x48,0x42,0xb2,0x00,0x20,0x00,0x00]
+ vgetexppd 8192(%rdx), %zmm14
+
+// CHECK: vgetexppd -8192(%rdx), %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x48,0x42,0x72,0x80]
+ vgetexppd -8192(%rdx), %zmm14
+
+// CHECK: vgetexppd -8256(%rdx), %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x48,0x42,0xb2,0xc0,0xdf,0xff,0xff]
+ vgetexppd -8256(%rdx), %zmm14
+
+// CHECK: vgetexppd 1016(%rdx){1to8}, %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x58,0x42,0x72,0x7f]
+ vgetexppd 1016(%rdx){1to8}, %zmm14
+
+// CHECK: vgetexppd 1024(%rdx){1to8}, %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x58,0x42,0xb2,0x00,0x04,0x00,0x00]
+ vgetexppd 1024(%rdx){1to8}, %zmm14
+
+// CHECK: vgetexppd -1024(%rdx){1to8}, %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x58,0x42,0x72,0x80]
+ vgetexppd -1024(%rdx){1to8}, %zmm14
+
+// CHECK: vgetexppd -1032(%rdx){1to8}, %zmm14
+// CHECK: encoding: [0x62,0x72,0xfd,0x58,0x42,0xb2,0xf8,0xfb,0xff,0xff]
+ vgetexppd -1032(%rdx){1to8}, %zmm14
+
+// CHECK: vgetexpps %zmm6, %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x42,0xce]
+ vgetexpps %zmm6, %zmm1
+
+// CHECK: vgetexpps %zmm6, %zmm1 {%k3}
+// CHECK: encoding: [0x62,0xf2,0x7d,0x4b,0x42,0xce]
+ vgetexpps %zmm6, %zmm1 {%k3}
+
+// CHECK: vgetexpps %zmm6, %zmm1 {%k3} {z}
+// CHECK: encoding: [0x62,0xf2,0x7d,0xcb,0x42,0xce]
+ vgetexpps %zmm6, %zmm1 {%k3} {z}
+
+// CHECK: vgetexpps {sae}, %zmm6, %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x18,0x42,0xce]
+ vgetexpps {sae}, %zmm6, %zmm1
+
+// CHECK: vgetexpps (%rcx), %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x42,0x09]
+ vgetexpps (%rcx), %zmm1
+
+// CHECK: vgetexpps 291(%rax,%r14,8), %zmm1
+// CHECK: encoding: [0x62,0xb2,0x7d,0x48,0x42,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vgetexpps 291(%rax,%r14,8), %zmm1
+
+// CHECK: vgetexpps (%rcx){1to16}, %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x58,0x42,0x09]
+ vgetexpps (%rcx){1to16}, %zmm1
+
+// CHECK: vgetexpps 8128(%rdx), %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x42,0x4a,0x7f]
+ vgetexpps 8128(%rdx), %zmm1
+
+// CHECK: vgetexpps 8192(%rdx), %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x42,0x8a,0x00,0x20,0x00,0x00]
+ vgetexpps 8192(%rdx), %zmm1
+
+// CHECK: vgetexpps -8192(%rdx), %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x42,0x4a,0x80]
+ vgetexpps -8192(%rdx), %zmm1
+
+// CHECK: vgetexpps -8256(%rdx), %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x42,0x8a,0xc0,0xdf,0xff,0xff]
+ vgetexpps -8256(%rdx), %zmm1
+
+// CHECK: vgetexpps 508(%rdx){1to16}, %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x58,0x42,0x4a,0x7f]
+ vgetexpps 508(%rdx){1to16}, %zmm1
+
+// CHECK: vgetexpps 512(%rdx){1to16}, %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x58,0x42,0x8a,0x00,0x02,0x00,0x00]
+ vgetexpps 512(%rdx){1to16}, %zmm1
+
+// CHECK: vgetexpps -512(%rdx){1to16}, %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x58,0x42,0x4a,0x80]
+ vgetexpps -512(%rdx){1to16}, %zmm1
+
+// CHECK: vgetexpps -516(%rdx){1to16}, %zmm1
+// CHECK: encoding: [0x62,0xf2,0x7d,0x58,0x42,0x8a,0xfc,0xfd,0xff,0xff]
+ vgetexpps -516(%rdx){1to16}, %zmm1
+
+// CHECK: vshuff32x4 $171, %zmm3, %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x40,0x23,0xf3,0xab]
+ vshuff32x4 $171, %zmm3, %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $171, %zmm3, %zmm24, %zmm6 {%k2}
+// CHECK: encoding: [0x62,0xf3,0x3d,0x42,0x23,0xf3,0xab]
+ vshuff32x4 $171, %zmm3, %zmm24, %zmm6 {%k2}
+
+// CHECK: vshuff32x4 $171, %zmm3, %zmm24, %zmm6 {%k2} {z}
+// CHECK: encoding: [0x62,0xf3,0x3d,0xc2,0x23,0xf3,0xab]
+ vshuff32x4 $171, %zmm3, %zmm24, %zmm6 {%k2} {z}
+
+// CHECK: vshuff32x4 $123, %zmm3, %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x40,0x23,0xf3,0x7b]
+ vshuff32x4 $123, %zmm3, %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, (%rcx), %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x40,0x23,0x31,0x7b]
+ vshuff32x4 $123, (%rcx), %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, 291(%rax,%r14,8), %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xb3,0x3d,0x40,0x23,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vshuff32x4 $123, 291(%rax,%r14,8), %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, (%rcx){1to16}, %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x50,0x23,0x31,0x7b]
+ vshuff32x4 $123, (%rcx){1to16}, %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, 8128(%rdx), %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x40,0x23,0x72,0x7f,0x7b]
+ vshuff32x4 $123, 8128(%rdx), %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, 8192(%rdx), %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x40,0x23,0xb2,0x00,0x20,0x00,0x00,0x7b]
+ vshuff32x4 $123, 8192(%rdx), %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, -8192(%rdx), %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x40,0x23,0x72,0x80,0x7b]
+ vshuff32x4 $123, -8192(%rdx), %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, -8256(%rdx), %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x40,0x23,0xb2,0xc0,0xdf,0xff,0xff,0x7b]
+ vshuff32x4 $123, -8256(%rdx), %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, 508(%rdx){1to16}, %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x50,0x23,0x72,0x7f,0x7b]
+ vshuff32x4 $123, 508(%rdx){1to16}, %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, 512(%rdx){1to16}, %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x50,0x23,0xb2,0x00,0x02,0x00,0x00,0x7b]
+ vshuff32x4 $123, 512(%rdx){1to16}, %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, -512(%rdx){1to16}, %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x50,0x23,0x72,0x80,0x7b]
+ vshuff32x4 $123, -512(%rdx){1to16}, %zmm24, %zmm6
+
+// CHECK: vshuff32x4 $123, -516(%rdx){1to16}, %zmm24, %zmm6
+// CHECK: encoding: [0x62,0xf3,0x3d,0x50,0x23,0xb2,0xfc,0xfd,0xff,0xff,0x7b]
+ vshuff32x4 $123, -516(%rdx){1to16}, %zmm24, %zmm6
+
+// CHECK: vshuff64x2 $171, %zmm11, %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x53,0xb5,0x40,0x23,0xfb,0xab]
+ vshuff64x2 $171, %zmm11, %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $171, %zmm11, %zmm25, %zmm15 {%k2}
+// CHECK: encoding: [0x62,0x53,0xb5,0x42,0x23,0xfb,0xab]
+ vshuff64x2 $171, %zmm11, %zmm25, %zmm15 {%k2}
+
+// CHECK: vshuff64x2 $171, %zmm11, %zmm25, %zmm15 {%k2} {z}
+// CHECK: encoding: [0x62,0x53,0xb5,0xc2,0x23,0xfb,0xab]
+ vshuff64x2 $171, %zmm11, %zmm25, %zmm15 {%k2} {z}
+
+// CHECK: vshuff64x2 $123, %zmm11, %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x53,0xb5,0x40,0x23,0xfb,0x7b]
+ vshuff64x2 $123, %zmm11, %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, (%rcx), %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x40,0x23,0x39,0x7b]
+ vshuff64x2 $123, (%rcx), %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, 291(%rax,%r14,8), %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x33,0xb5,0x40,0x23,0xbc,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vshuff64x2 $123, 291(%rax,%r14,8), %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, (%rcx){1to8}, %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x50,0x23,0x39,0x7b]
+ vshuff64x2 $123, (%rcx){1to8}, %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, 8128(%rdx), %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x40,0x23,0x7a,0x7f,0x7b]
+ vshuff64x2 $123, 8128(%rdx), %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, 8192(%rdx), %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x40,0x23,0xba,0x00,0x20,0x00,0x00,0x7b]
+ vshuff64x2 $123, 8192(%rdx), %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, -8192(%rdx), %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x40,0x23,0x7a,0x80,0x7b]
+ vshuff64x2 $123, -8192(%rdx), %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, -8256(%rdx), %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x40,0x23,0xba,0xc0,0xdf,0xff,0xff,0x7b]
+ vshuff64x2 $123, -8256(%rdx), %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, 1016(%rdx){1to8}, %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x50,0x23,0x7a,0x7f,0x7b]
+ vshuff64x2 $123, 1016(%rdx){1to8}, %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, 1024(%rdx){1to8}, %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x50,0x23,0xba,0x00,0x04,0x00,0x00,0x7b]
+ vshuff64x2 $123, 1024(%rdx){1to8}, %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, -1024(%rdx){1to8}, %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x50,0x23,0x7a,0x80,0x7b]
+ vshuff64x2 $123, -1024(%rdx){1to8}, %zmm25, %zmm15
+
+// CHECK: vshuff64x2 $123, -1032(%rdx){1to8}, %zmm25, %zmm15
+// CHECK: encoding: [0x62,0x73,0xb5,0x50,0x23,0xba,0xf8,0xfb,0xff,0xff,0x7b]
+ vshuff64x2 $123, -1032(%rdx){1to8}, %zmm25, %zmm15
+
+// CHECK: vshufi32x4 $171, %zmm25, %zmm28, %zmm1
+// CHECK: encoding: [0x62,0x93,0x1d,0x40,0x43,0xc9,0xab]
+ vshufi32x4 $171, %zmm25, %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $171, %zmm25, %zmm28, %zmm1 {%k4}
+// CHECK: encoding: [0x62,0x93,0x1d,0x44,0x43,0xc9,0xab]
+ vshufi32x4 $171, %zmm25, %zmm28, %zmm1 {%k4}
+
+// CHECK: vshufi32x4 $171, %zmm25, %zmm28, %zmm1 {%k4} {z}
+// CHECK: encoding: [0x62,0x93,0x1d,0xc4,0x43,0xc9,0xab]
+ vshufi32x4 $171, %zmm25, %zmm28, %zmm1 {%k4} {z}
+
+// CHECK: vshufi32x4 $123, %zmm25, %zmm28, %zmm1
+// CHECK: encoding: [0x62,0x93,0x1d,0x40,0x43,0xc9,0x7b]
+ vshufi32x4 $123, %zmm25, %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, (%rcx), %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x40,0x43,0x09,0x7b]
+ vshufi32x4 $123, (%rcx), %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, 291(%rax,%r14,8), %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xb3,0x1d,0x40,0x43,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vshufi32x4 $123, 291(%rax,%r14,8), %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, (%rcx){1to16}, %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x50,0x43,0x09,0x7b]
+ vshufi32x4 $123, (%rcx){1to16}, %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, 8128(%rdx), %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x40,0x43,0x4a,0x7f,0x7b]
+ vshufi32x4 $123, 8128(%rdx), %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, 8192(%rdx), %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x40,0x43,0x8a,0x00,0x20,0x00,0x00,0x7b]
+ vshufi32x4 $123, 8192(%rdx), %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, -8192(%rdx), %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x40,0x43,0x4a,0x80,0x7b]
+ vshufi32x4 $123, -8192(%rdx), %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, -8256(%rdx), %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x40,0x43,0x8a,0xc0,0xdf,0xff,0xff,0x7b]
+ vshufi32x4 $123, -8256(%rdx), %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, 508(%rdx){1to16}, %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x50,0x43,0x4a,0x7f,0x7b]
+ vshufi32x4 $123, 508(%rdx){1to16}, %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, 512(%rdx){1to16}, %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x50,0x43,0x8a,0x00,0x02,0x00,0x00,0x7b]
+ vshufi32x4 $123, 512(%rdx){1to16}, %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, -512(%rdx){1to16}, %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x50,0x43,0x4a,0x80,0x7b]
+ vshufi32x4 $123, -512(%rdx){1to16}, %zmm28, %zmm1
+
+// CHECK: vshufi32x4 $123, -516(%rdx){1to16}, %zmm28, %zmm1
+// CHECK: encoding: [0x62,0xf3,0x1d,0x50,0x43,0x8a,0xfc,0xfd,0xff,0xff,0x7b]
+ vshufi32x4 $123, -516(%rdx){1to16}, %zmm28, %zmm1
+
+// CHECK: vshufi64x2 $171, %zmm19, %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xb3,0xfd,0x40,0x43,0xdb,0xab]
+ vshufi64x2 $171, %zmm19, %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $171, %zmm19, %zmm16, %zmm3 {%k7}
+// CHECK: encoding: [0x62,0xb3,0xfd,0x47,0x43,0xdb,0xab]
+ vshufi64x2 $171, %zmm19, %zmm16, %zmm3 {%k7}
+
+// CHECK: vshufi64x2 $171, %zmm19, %zmm16, %zmm3 {%k7} {z}
+// CHECK: encoding: [0x62,0xb3,0xfd,0xc7,0x43,0xdb,0xab]
+ vshufi64x2 $171, %zmm19, %zmm16, %zmm3 {%k7} {z}
+
+// CHECK: vshufi64x2 $123, %zmm19, %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xb3,0xfd,0x40,0x43,0xdb,0x7b]
+ vshufi64x2 $123, %zmm19, %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, (%rcx), %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x40,0x43,0x19,0x7b]
+ vshufi64x2 $123, (%rcx), %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, 291(%rax,%r14,8), %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xb3,0xfd,0x40,0x43,0x9c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vshufi64x2 $123, 291(%rax,%r14,8), %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, (%rcx){1to8}, %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x50,0x43,0x19,0x7b]
+ vshufi64x2 $123, (%rcx){1to8}, %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, 8128(%rdx), %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x40,0x43,0x5a,0x7f,0x7b]
+ vshufi64x2 $123, 8128(%rdx), %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, 8192(%rdx), %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x40,0x43,0x9a,0x00,0x20,0x00,0x00,0x7b]
+ vshufi64x2 $123, 8192(%rdx), %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, -8192(%rdx), %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x40,0x43,0x5a,0x80,0x7b]
+ vshufi64x2 $123, -8192(%rdx), %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, -8256(%rdx), %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x40,0x43,0x9a,0xc0,0xdf,0xff,0xff,0x7b]
+ vshufi64x2 $123, -8256(%rdx), %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, 1016(%rdx){1to8}, %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x50,0x43,0x5a,0x7f,0x7b]
+ vshufi64x2 $123, 1016(%rdx){1to8}, %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, 1024(%rdx){1to8}, %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x50,0x43,0x9a,0x00,0x04,0x00,0x00,0x7b]
+ vshufi64x2 $123, 1024(%rdx){1to8}, %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, -1024(%rdx){1to8}, %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x50,0x43,0x5a,0x80,0x7b]
+ vshufi64x2 $123, -1024(%rdx){1to8}, %zmm16, %zmm3
+
+// CHECK: vshufi64x2 $123, -1032(%rdx){1to8}, %zmm16, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x50,0x43,0x9a,0xf8,0xfb,0xff,0xff,0x7b]
+ vshufi64x2 $123, -1032(%rdx){1to8}, %zmm16, %zmm3
+
+// CHECK: vpermps %zmm24, %zmm2, %zmm4
+// CHECK: encoding: [0x62,0x92,0x6d,0x48,0x16,0xe0]
+ vpermps %zmm24, %zmm2, %zmm4
+
+// CHECK: vpermps %zmm24, %zmm2, %zmm4 {%k4}
+// CHECK: encoding: [0x62,0x92,0x6d,0x4c,0x16,0xe0]
+ vpermps %zmm24, %zmm2, %zmm4 {%k4}
+
+// CHECK: vpermps %zmm24, %zmm2, %zmm4 {%k4} {z}
+// CHECK: encoding: [0x62,0x92,0x6d,0xcc,0x16,0xe0]
+ vpermps %zmm24, %zmm2, %zmm4 {%k4} {z}
+
+// CHECK: vpermps (%rcx), %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x48,0x16,0x21]
+ vpermps (%rcx), %zmm2, %zmm4
+
+// CHECK: vpermps 291(%rax,%r14,8), %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xb2,0x6d,0x48,0x16,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vpermps 291(%rax,%r14,8), %zmm2, %zmm4
+
+// CHECK: vpermps (%rcx){1to16}, %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x58,0x16,0x21]
+ vpermps (%rcx){1to16}, %zmm2, %zmm4
+
+// CHECK: vpermps 8128(%rdx), %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x48,0x16,0x62,0x7f]
+ vpermps 8128(%rdx), %zmm2, %zmm4
+
+// CHECK: vpermps 8192(%rdx), %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x48,0x16,0xa2,0x00,0x20,0x00,0x00]
+ vpermps 8192(%rdx), %zmm2, %zmm4
+
+// CHECK: vpermps -8192(%rdx), %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x48,0x16,0x62,0x80]
+ vpermps -8192(%rdx), %zmm2, %zmm4
+
+// CHECK: vpermps -8256(%rdx), %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x48,0x16,0xa2,0xc0,0xdf,0xff,0xff]
+ vpermps -8256(%rdx), %zmm2, %zmm4
+
+// CHECK: vpermps 508(%rdx){1to16}, %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x58,0x16,0x62,0x7f]
+ vpermps 508(%rdx){1to16}, %zmm2, %zmm4
+
+// CHECK: vpermps 512(%rdx){1to16}, %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x58,0x16,0xa2,0x00,0x02,0x00,0x00]
+ vpermps 512(%rdx){1to16}, %zmm2, %zmm4
+
+// CHECK: vpermps -512(%rdx){1to16}, %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x58,0x16,0x62,0x80]
+ vpermps -512(%rdx){1to16}, %zmm2, %zmm4
+
+// CHECK: vpermps -516(%rdx){1to16}, %zmm2, %zmm4
+// CHECK: encoding: [0x62,0xf2,0x6d,0x58,0x16,0xa2,0xfc,0xfd,0xff,0xff]
+ vpermps -516(%rdx){1to16}, %zmm2, %zmm4
+
+// CHECK: vpermq $171, %zmm4, %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x48,0x00,0xcc,0xab]
+ vpermq $171, %zmm4, %zmm25
+
+// CHECK: vpermq $171, %zmm4, %zmm25 {%k6}
+// CHECK: encoding: [0x62,0x63,0xfd,0x4e,0x00,0xcc,0xab]
+ vpermq $171, %zmm4, %zmm25 {%k6}
+
+// CHECK: vpermq $171, %zmm4, %zmm25 {%k6} {z}
+// CHECK: encoding: [0x62,0x63,0xfd,0xce,0x00,0xcc,0xab]
+ vpermq $171, %zmm4, %zmm25 {%k6} {z}
+
+// CHECK: vpermq $123, %zmm4, %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x48,0x00,0xcc,0x7b]
+ vpermq $123, %zmm4, %zmm25
+
+// CHECK: vpermq $123, (%rcx), %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x48,0x00,0x09,0x7b]
+ vpermq $123, (%rcx), %zmm25
+
+// CHECK: vpermq $123, 291(%rax,%r14,8), %zmm25
+// CHECK: encoding: [0x62,0x23,0xfd,0x48,0x00,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpermq $123, 291(%rax,%r14,8), %zmm25
+
+// CHECK: vpermq $123, (%rcx){1to8}, %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x58,0x00,0x09,0x7b]
+ vpermq $123, (%rcx){1to8}, %zmm25
+
+// CHECK: vpermq $123, 8128(%rdx), %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x48,0x00,0x4a,0x7f,0x7b]
+ vpermq $123, 8128(%rdx), %zmm25
+
+// CHECK: vpermq $123, 8192(%rdx), %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x48,0x00,0x8a,0x00,0x20,0x00,0x00,0x7b]
+ vpermq $123, 8192(%rdx), %zmm25
+
+// CHECK: vpermq $123, -8192(%rdx), %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x48,0x00,0x4a,0x80,0x7b]
+ vpermq $123, -8192(%rdx), %zmm25
+
+// CHECK: vpermq $123, -8256(%rdx), %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x48,0x00,0x8a,0xc0,0xdf,0xff,0xff,0x7b]
+ vpermq $123, -8256(%rdx), %zmm25
+
+// CHECK: vpermq $123, 1016(%rdx){1to8}, %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x58,0x00,0x4a,0x7f,0x7b]
+ vpermq $123, 1016(%rdx){1to8}, %zmm25
+
+// CHECK: vpermq $123, 1024(%rdx){1to8}, %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x58,0x00,0x8a,0x00,0x04,0x00,0x00,0x7b]
+ vpermq $123, 1024(%rdx){1to8}, %zmm25
+
+// CHECK: vpermq $123, -1024(%rdx){1to8}, %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x58,0x00,0x4a,0x80,0x7b]
+ vpermq $123, -1024(%rdx){1to8}, %zmm25
+
+// CHECK: vpermq $123, -1032(%rdx){1to8}, %zmm25
+// CHECK: encoding: [0x62,0x63,0xfd,0x58,0x00,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+ vpermq $123, -1032(%rdx){1to8}, %zmm25
+
+// CHECK: vpermq %zmm22, %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xa2,0xc5,0x40,0x36,0xee]
+ vpermq %zmm22, %zmm23, %zmm21
+
+// CHECK: vpermq %zmm22, %zmm23, %zmm21 {%k1}
+// CHECK: encoding: [0x62,0xa2,0xc5,0x41,0x36,0xee]
+ vpermq %zmm22, %zmm23, %zmm21 {%k1}
+
+// CHECK: vpermq %zmm22, %zmm23, %zmm21 {%k1} {z}
+// CHECK: encoding: [0x62,0xa2,0xc5,0xc1,0x36,0xee]
+ vpermq %zmm22, %zmm23, %zmm21 {%k1} {z}
+
+// CHECK: vpermq (%rcx), %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x40,0x36,0x29]
+ vpermq (%rcx), %zmm23, %zmm21
+
+// CHECK: vpermq 291(%rax,%r14,8), %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xa2,0xc5,0x40,0x36,0xac,0xf0,0x23,0x01,0x00,0x00]
+ vpermq 291(%rax,%r14,8), %zmm23, %zmm21
+
+// CHECK: vpermq (%rcx){1to8}, %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x50,0x36,0x29]
+ vpermq (%rcx){1to8}, %zmm23, %zmm21
+
+// CHECK: vpermq 8128(%rdx), %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x40,0x36,0x6a,0x7f]
+ vpermq 8128(%rdx), %zmm23, %zmm21
+
+// CHECK: vpermq 8192(%rdx), %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x40,0x36,0xaa,0x00,0x20,0x00,0x00]
+ vpermq 8192(%rdx), %zmm23, %zmm21
+
+// CHECK: vpermq -8192(%rdx), %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x40,0x36,0x6a,0x80]
+ vpermq -8192(%rdx), %zmm23, %zmm21
+
+// CHECK: vpermq -8256(%rdx), %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x40,0x36,0xaa,0xc0,0xdf,0xff,0xff]
+ vpermq -8256(%rdx), %zmm23, %zmm21
+
+// CHECK: vpermq 1016(%rdx){1to8}, %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x50,0x36,0x6a,0x7f]
+ vpermq 1016(%rdx){1to8}, %zmm23, %zmm21
+
+// CHECK: vpermq 1024(%rdx){1to8}, %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x50,0x36,0xaa,0x00,0x04,0x00,0x00]
+ vpermq 1024(%rdx){1to8}, %zmm23, %zmm21
+
+// CHECK: vpermq -1024(%rdx){1to8}, %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x50,0x36,0x6a,0x80]
+ vpermq -1024(%rdx){1to8}, %zmm23, %zmm21
+
+// CHECK: vpermq -1032(%rdx){1to8}, %zmm23, %zmm21
+// CHECK: encoding: [0x62,0xe2,0xc5,0x50,0x36,0xaa,0xf8,0xfb,0xff,0xff]
+ vpermq -1032(%rdx){1to8}, %zmm23, %zmm21
+
+// CHECK: vpermpd %zmm18, %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x22,0x95,0x40,0x16,0xd2]
+ vpermpd %zmm18, %zmm29, %zmm26
+
+// CHECK: vpermpd %zmm18, %zmm29, %zmm26 {%k6}
+// CHECK: encoding: [0x62,0x22,0x95,0x46,0x16,0xd2]
+ vpermpd %zmm18, %zmm29, %zmm26 {%k6}
+
+// CHECK: vpermpd %zmm18, %zmm29, %zmm26 {%k6} {z}
+// CHECK: encoding: [0x62,0x22,0x95,0xc6,0x16,0xd2]
+ vpermpd %zmm18, %zmm29, %zmm26 {%k6} {z}
+
+// CHECK: vpermpd (%rcx), %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x40,0x16,0x11]
+ vpermpd (%rcx), %zmm29, %zmm26
+
+// CHECK: vpermpd 291(%rax,%r14,8), %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x22,0x95,0x40,0x16,0x94,0xf0,0x23,0x01,0x00,0x00]
+ vpermpd 291(%rax,%r14,8), %zmm29, %zmm26
+
+// CHECK: vpermpd (%rcx){1to8}, %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x50,0x16,0x11]
+ vpermpd (%rcx){1to8}, %zmm29, %zmm26
+
+// CHECK: vpermpd 8128(%rdx), %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x40,0x16,0x52,0x7f]
+ vpermpd 8128(%rdx), %zmm29, %zmm26
+
+// CHECK: vpermpd 8192(%rdx), %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x40,0x16,0x92,0x00,0x20,0x00,0x00]
+ vpermpd 8192(%rdx), %zmm29, %zmm26
+
+// CHECK: vpermpd -8192(%rdx), %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x40,0x16,0x52,0x80]
+ vpermpd -8192(%rdx), %zmm29, %zmm26
+
+// CHECK: vpermpd -8256(%rdx), %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x40,0x16,0x92,0xc0,0xdf,0xff,0xff]
+ vpermpd -8256(%rdx), %zmm29, %zmm26
+
+// CHECK: vpermpd 1016(%rdx){1to8}, %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x50,0x16,0x52,0x7f]
+ vpermpd 1016(%rdx){1to8}, %zmm29, %zmm26
+
+// CHECK: vpermpd 1024(%rdx){1to8}, %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x50,0x16,0x92,0x00,0x04,0x00,0x00]
+ vpermpd 1024(%rdx){1to8}, %zmm29, %zmm26
+
+// CHECK: vpermpd -1024(%rdx){1to8}, %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x50,0x16,0x52,0x80]
+ vpermpd -1024(%rdx){1to8}, %zmm29, %zmm26
+
+// CHECK: vpermpd -1032(%rdx){1to8}, %zmm29, %zmm26
+// CHECK: encoding: [0x62,0x62,0x95,0x50,0x16,0x92,0xf8,0xfb,0xff,0xff]
+ vpermpd -1032(%rdx){1to8}, %zmm29, %zmm26
+
+// CHECK: vpermpd $171, %zmm27, %zmm3
+// CHECK: encoding: [0x62,0x93,0xfd,0x48,0x01,0xdb,0xab]
+ vpermpd $0xab, %zmm27, %zmm3
+
+// CHECK: vpermpd $171, %zmm27, %zmm3 {%k2}
+// CHECK: encoding: [0x62,0x93,0xfd,0x4a,0x01,0xdb,0xab]
+ vpermpd $0xab, %zmm27, %zmm3 {%k2}
+
+// CHECK: vpermpd $171, %zmm27, %zmm3 {%k2} {z}
+// CHECK: encoding: [0x62,0x93,0xfd,0xca,0x01,0xdb,0xab]
+ vpermpd $0xab, %zmm27, %zmm3 {%k2} {z}
+
+// CHECK: vpermpd $123, %zmm27, %zmm3
+// CHECK: encoding: [0x62,0x93,0xfd,0x48,0x01,0xdb,0x7b]
+ vpermpd $0x7b, %zmm27, %zmm3
+
+// CHECK: vpermpd $123, (%rcx), %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x48,0x01,0x19,0x7b]
+ vpermpd $0x7b, (%rcx), %zmm3
+
+// CHECK: vpermpd $123, 291(%rax,%r14,8), %zmm3
+// CHECK: encoding: [0x62,0xb3,0xfd,0x48,0x01,0x9c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpermpd $0x7b, 291(%rax,%r14,8), %zmm3
+
+// CHECK: vpermpd $123, (%rcx){1to8}, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x58,0x01,0x19,0x7b]
+ vpermpd $0x7b, (%rcx){1to8}, %zmm3
+
+// CHECK: vpermpd $123, 8128(%rdx), %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x48,0x01,0x5a,0x7f,0x7b]
+ vpermpd $0x7b, 8128(%rdx), %zmm3
+
+// CHECK: vpermpd $123, 8192(%rdx), %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x48,0x01,0x9a,0x00,0x20,0x00,0x00,0x7b]
+ vpermpd $0x7b, 8192(%rdx), %zmm3
+
+// CHECK: vpermpd $123, -8192(%rdx), %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x48,0x01,0x5a,0x80,0x7b]
+ vpermpd $0x7b, -8192(%rdx), %zmm3
+
+// CHECK: vpermpd $123, -8256(%rdx), %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x48,0x01,0x9a,0xc0,0xdf,0xff,0xff,0x7b]
+ vpermpd $0x7b, -8256(%rdx), %zmm3
+
+// CHECK: vpermpd $123, 1016(%rdx){1to8}, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x58,0x01,0x5a,0x7f,0x7b]
+ vpermpd $0x7b, 1016(%rdx){1to8}, %zmm3
+
+// CHECK: vpermpd $123, 1024(%rdx){1to8}, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x58,0x01,0x9a,0x00,0x04,0x00,0x00,0x7b]
+ vpermpd $0x7b, 1024(%rdx){1to8}, %zmm3
+
+// CHECK: vpermpd $123, -1024(%rdx){1to8}, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x58,0x01,0x5a,0x80,0x7b]
+ vpermpd $0x7b, -1024(%rdx){1to8}, %zmm3
+
+// CHECK: vpermpd $123, -1032(%rdx){1to8}, %zmm3
+// CHECK: encoding: [0x62,0xf3,0xfd,0x58,0x01,0x9a,0xf8,0xfb,0xff,0xff,0x7b]
+ vpermpd $0x7b, -1032(%rdx){1to8}, %zmm3
+
+// CHECK: vpermd %zmm9, %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xc2,0x1d,0x40,0x36,0xf1]
+ vpermd %zmm9, %zmm28, %zmm22
+
+// CHECK: vpermd %zmm9, %zmm28, %zmm22 {%k1}
+// CHECK: encoding: [0x62,0xc2,0x1d,0x41,0x36,0xf1]
+ vpermd %zmm9, %zmm28, %zmm22 {%k1}
+
+// CHECK: vpermd %zmm9, %zmm28, %zmm22 {%k1} {z}
+// CHECK: encoding: [0x62,0xc2,0x1d,0xc1,0x36,0xf1]
+ vpermd %zmm9, %zmm28, %zmm22 {%k1} {z}
+
+// CHECK: vpermd (%rcx), %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x40,0x36,0x31]
+ vpermd (%rcx), %zmm28, %zmm22
+
+// CHECK: vpermd 291(%rax,%r14,8), %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xa2,0x1d,0x40,0x36,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpermd 291(%rax,%r14,8), %zmm28, %zmm22
+
+// CHECK: vpermd (%rcx){1to16}, %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x50,0x36,0x31]
+ vpermd (%rcx){1to16}, %zmm28, %zmm22
+
+// CHECK: vpermd 8128(%rdx), %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x40,0x36,0x72,0x7f]
+ vpermd 8128(%rdx), %zmm28, %zmm22
+
+// CHECK: vpermd 8192(%rdx), %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x40,0x36,0xb2,0x00,0x20,0x00,0x00]
+ vpermd 8192(%rdx), %zmm28, %zmm22
+
+// CHECK: vpermd -8192(%rdx), %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x40,0x36,0x72,0x80]
+ vpermd -8192(%rdx), %zmm28, %zmm22
+
+// CHECK: vpermd -8256(%rdx), %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x40,0x36,0xb2,0xc0,0xdf,0xff,0xff]
+ vpermd -8256(%rdx), %zmm28, %zmm22
+
+// CHECK: vpermd 508(%rdx){1to16}, %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x50,0x36,0x72,0x7f]
+ vpermd 508(%rdx){1to16}, %zmm28, %zmm22
+
+// CHECK: vpermd 512(%rdx){1to16}, %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x50,0x36,0xb2,0x00,0x02,0x00,0x00]
+ vpermd 512(%rdx){1to16}, %zmm28, %zmm22
+
+// CHECK: vpermd -512(%rdx){1to16}, %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x50,0x36,0x72,0x80]
+ vpermd -512(%rdx){1to16}, %zmm28, %zmm22
+
+// CHECK: vpermd -516(%rdx){1to16}, %zmm28, %zmm22
+// CHECK: encoding: [0x62,0xe2,0x1d,0x50,0x36,0xb2,0xfc,0xfd,0xff,0xff]
+ vpermd -516(%rdx){1to16}, %zmm28, %zmm22
+
+
diff --git a/test/MC/X86/hex-immediates.s b/test/MC/X86/hex-immediates.s
new file mode 100644
index 0000000000000..03f30423ae0a8
--- /dev/null
+++ b/test/MC/X86/hex-immediates.s
@@ -0,0 +1,10 @@
+# RUN: llvm-mc -filetype=obj %s -triple=x86_64-apple-darwin9 | llvm-objdump -d --print-imm-hex - | FileCheck %s
+
+# CHECK: movabsq $0x7fffffffffffffff, %rcx
+movabsq $0x7fffffffffffffff, %rcx
+# CHECK: leaq 0x3e2(%rip), %rdi
+leaq 0x3e2(%rip), %rdi
+# CHECK: subq $0x40, %rsp
+subq $0x40, %rsp
+# CHECK: leal (,%r14,4), %eax
+leal (,%r14,4), %eax
diff --git a/test/MC/X86/intel-syntax-avx512.s b/test/MC/X86/intel-syntax-avx512.s
index ffdbd20b77eaf..86a1af8bc16fd 100644
--- a/test/MC/X86/intel-syntax-avx512.s
+++ b/test/MC/X86/intel-syntax-avx512.s
@@ -161,15 +161,98 @@ vaddpd zmm1,zmm1,zmm2,{rz-sae}
vcmpps k2,zmm17,DWORD PTR [rdx-0x204]{1to16},0x7b
+// CHECK: vfixupimmss xmm15 , xmm18, xmm28, 171
+// CHECK: encoding: [0x62,0x13,0x6d,0x00,0x55,0xfc,0xab]
+ vfixupimmss xmm15,xmm18,xmm28,0xab
+// CHECK: vfixupimmss xmm15 {k5}, xmm18, xmm28, 171
+// CHECK: encoding: [0x62,0x13,0x6d,0x05,0x55,0xfc,0xab]
+ vfixupimmss xmm15{k5},xmm18,xmm28,0xab
+// CHECK: vfixupimmss xmm15 {k5} {z}, xmm18, xmm28, 171
+// CHECK: encoding: [0x62,0x13,0x6d,0x85,0x55,0xfc,0xab]
+ vfixupimmss xmm15{k5} {z},xmm18,xmm28,0xab
+// CHECK: vfixupimmss xmm15 , xmm18, xmm28,{sae}, 171
+// CHECK: encoding: [0x62,0x13,0x6d,0x10,0x55,0xfc,0xab]
+ vfixupimmss xmm15,xmm18,xmm28,{sae},0xab
+// CHECK: vfixupimmss xmm15 , xmm18, xmm28, 123
+// CHECK: encoding: [0x62,0x13,0x6d,0x00,0x55,0xfc,0x7b]
+ vfixupimmss xmm15,xmm18,xmm28,0x7b
+// CHECK: vfixupimmss xmm15 , xmm18, xmm28,{sae}, 123
+// CHECK: encoding: [0x62,0x13,0x6d,0x10,0x55,0xfc,0x7b]
+ vfixupimmss xmm15,xmm18,xmm28,{sae},0x7b
+// CHECK: vfixupimmss xmm15 , xmm18, dword ptr [rcx], 123
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0x39,0x7b]
+ vfixupimmss xmm15,xmm18,DWORD PTR [rcx],0x7b
+// CHECK: vfixupimmss xmm15 , xmm18, dword ptr [rax + 8*r14 + 291], 123
+// CHECK: encoding: [0x62,0x33,0x6d,0x00,0x55,0xbc,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmss xmm15,xmm18,DWORD PTR [rax+r14*8+0x123],0x7b
+// CHECK: vfixupimmss xmm15 , xmm18, dword ptr [rdx + 508], 123
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0x7a,0x7f,0x7b]
+ vfixupimmss xmm15,xmm18,DWORD PTR [rdx+0x1fc],0x7b
+// CHECK: vfixupimmss xmm15 , xmm18, dword ptr [rdx + 512], 123
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0xba,0x00,0x02,0x00,0x00,0x7b]
+ vfixupimmss xmm15,xmm18,DWORD PTR [rdx+0x200],0x7b
+// CHECK: vfixupimmss xmm15 , xmm18, dword ptr [rdx - 512], 123
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0x7a,0x80,0x7b]
+ vfixupimmss xmm15,xmm18,DWORD PTR [rdx-0x200],0x7b
+// CHECK: vfixupimmss xmm15 , xmm18, dword ptr [rdx - 516], 123
+// CHECK: encoding: [0x62,0x73,0x6d,0x00,0x55,0xba,0xfc,0xfd,0xff,0xff,0x7b]
+ vfixupimmss xmm15,xmm18,DWORD PTR [rdx-0x204],0x7b
+// CHECK: vfixupimmsd xmm13 , xmm26, xmm5, 171
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0xed,0xab]
+ vfixupimmsd xmm13,xmm26,xmm5,0xab
+
+// CHECK: vfixupimmsd xmm13 {k6}, xmm26, xmm5, 171
+// CHECK: encoding: [0x62,0x73,0xad,0x06,0x55,0xed,0xab]
+ vfixupimmsd xmm13{k6},xmm26,xmm5,0xab
+
+// CHECK: vfixupimmsd xmm13 {k6} {z}, xmm26, xmm5, 171
+// CHECK: encoding: [0x62,0x73,0xad,0x86,0x55,0xed,0xab]
+ vfixupimmsd xmm13{k6} {z},xmm26,xmm5,0xab
+
+// CHECK: vfixupimmsd xmm13 , xmm26, xmm5,{sae}, 171
+// CHECK: encoding: [0x62,0x73,0xad,0x10,0x55,0xed,0xab]
+ vfixupimmsd xmm13,xmm26,xmm5,{sae},0xab
+
+// CHECK: vfixupimmsd xmm13 , xmm26, xmm5, 123
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0xed,0x7b]
+ vfixupimmsd xmm13,xmm26,xmm5,0x7b
+
+// CHECK: vfixupimmsd xmm13 , xmm26, xmm5,{sae}, 123
+// CHECK: encoding: [0x62,0x73,0xad,0x10,0x55,0xed,0x7b]
+ vfixupimmsd xmm13,xmm26,xmm5,{sae},0x7b
+
+// CHECK: vfixupimmsd xmm13 , xmm26, qword ptr [rcx], 123
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0x29,0x7b]
+ vfixupimmsd xmm13,xmm26,QWORD PTR [rcx],0x7b
+
+// CHECK: vfixupimmsd xmm13 , xmm26, qword ptr [rax + 8*r14 + 291], 123
+// CHECK: encoding: [0x62,0x33,0xad,0x00,0x55,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmsd xmm13,xmm26,QWORD PTR [rax+r14*8+0x123],0x7b
+
+// CHECK: vfixupimmsd xmm13 , xmm26, qword ptr [rdx + 1016], 123
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0x6a,0x7f,0x7b]
+ vfixupimmsd xmm13,xmm26,QWORD PTR [rdx+0x3f8],0x7b
+
+// CHECK: vfixupimmsd xmm13 , xmm26, qword ptr [rdx + 1024], 123
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0xaa,0x00,0x04,0x00,0x00,0x7b]
+ vfixupimmsd xmm13,xmm26,QWORD PTR [rdx+0x400],0x7b
+
+// CHECK: vfixupimmsd xmm13 , xmm26, qword ptr [rdx - 1024], 123
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0x6a,0x80,0x7b]
+ vfixupimmsd xmm13,xmm26,QWORD PTR [rdx-0x400],0x7b
+
+// CHECK: vfixupimmsd xmm13 , xmm26, qword ptr [rdx - 1032], 123
+// CHECK: encoding: [0x62,0x73,0xad,0x00,0x55,0xaa,0xf8,0xfb,0xff,0xff,0x7b]
+ vfixupimmsd xmm13,xmm26,QWORD PTR [rdx-0x408],0x7b
diff --git a/test/MC/X86/intel-syntax.s b/test/MC/X86/intel-syntax.s
index fce0c65781cd0..30fe6c8b9b158 100644
--- a/test/MC/X86/intel-syntax.s
+++ b/test/MC/X86/intel-syntax.s
@@ -662,3 +662,6 @@ frstor dword ptr [eax]
// CHECK: fnsave (%eax)
// CHECK: fxrstor (%eax)
// CHECK: frstor (%eax)
+
+// CHECK: cmpnless %xmm1, %xmm0
+cmpnless xmm0, xmm1
diff --git a/test/MC/X86/mpx-encodings.s b/test/MC/X86/mpx-encodings.s
new file mode 100644
index 0000000000000..6fe4e0f328ec6
--- /dev/null
+++ b/test/MC/X86/mpx-encodings.s
@@ -0,0 +1,38 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown -mcpu=knl --show-encoding %s | FileCheck %s
+
+// CHECK: bndmk (%rax), %bnd0
+// CHECK: encoding: [0xf3,0x48,0x0f,0x1b,0x00]
+bndmk (%rax), %bnd0
+
+// CHECK: bndmk 1024(%rax), %bnd1
+// CHECK: encoding: [0xf3,0x48,0x0f,0x1b,0x88,0x00,0x04,0x00,0x00]
+bndmk 1024(%rax), %bnd1
+
+// CHECK: bndmov %bnd2, %bnd1
+// CHECK: encoding: [0x66,0x0f,0x1b,0xd1]
+bndmov %bnd2, %bnd1
+
+// CHECK: bndmov %bnd1, 1024(%r9)
+// CHECK: encoding: [0x66,0x49,0x0f,0x1b,0x89,0x00,0x04,0x00,0x00]
+bndmov %bnd1, 1024(%r9)
+
+// CHECK: bndstx %bnd1, 1024(%rax)
+// CHECK: encoding: [0x0f,0x1b,0x88,0x00,0x04,0x00,0x00]
+bndstx %bnd1, 1024(%rax)
+
+// CHECK: bndldx 1024(%r8), %bnd1
+// CHECK: encoding: [0x41,0x0f,0x1a,0x88,0x00,0x04,0x00,0x00]
+bndldx 1024(%r8), %bnd1
+
+// CHECK: bndcl 121(%r10), %bnd1
+// CHECK: encoding: [0xf3,0x49,0x0f,0x1a,0x4a,0x79]
+bndcl 121(%r10), %bnd1
+
+// CHECK: bndcn 121(%rcx), %bnd3
+// CHECK: encoding: [0xf2,0x48,0x0f,0x1b,0x59,0x79]
+bndcn 121(%rcx), %bnd3
+
+// CHECK: bndcu %rdx, %bnd3
+// CHECK: encoding: [0xf2,0x48,0x0f,0x1a,0xda]
+bndcu %rdx, %bnd3
+
diff --git a/test/MC/X86/x86-64-avx512bw.s b/test/MC/X86/x86-64-avx512bw.s
index ba043da663293..b81e3adffd251 100644
--- a/test/MC/X86/x86-64-avx512bw.s
+++ b/test/MC/X86/x86-64-avx512bw.s
@@ -3308,3 +3308,38 @@
// CHECK: encoding: [0x62,0xe1,0x5d,0x40,0xd9,0xa2,0xc0,0xdf,0xff,0xff]
vpsubusw -8256(%rdx), %zmm20, %zmm20
+// CHECK: vpermw %zmm21, %zmm19, %zmm22
+// CHECK: encoding: [0x62,0xa2,0xe5,0x40,0x8d,0xf5]
+ vpermw %zmm21, %zmm19, %zmm22
+
+// CHECK: vpermw %zmm21, %zmm19, %zmm22 {%k6}
+// CHECK: encoding: [0x62,0xa2,0xe5,0x46,0x8d,0xf5]
+ vpermw %zmm21, %zmm19, %zmm22 {%k6}
+
+// CHECK: vpermw %zmm21, %zmm19, %zmm22 {%k6} {z}
+// CHECK: encoding: [0x62,0xa2,0xe5,0xc6,0x8d,0xf5]
+ vpermw %zmm21, %zmm19, %zmm22 {%k6} {z}
+
+// CHECK: vpermw (%rcx), %zmm19, %zmm22
+// CHECK: encoding: [0x62,0xe2,0xe5,0x40,0x8d,0x31]
+ vpermw (%rcx), %zmm19, %zmm22
+
+// CHECK: vpermw 291(%rax,%r14,8), %zmm19, %zmm22
+// CHECK: encoding: [0x62,0xa2,0xe5,0x40,0x8d,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpermw 291(%rax,%r14,8), %zmm19, %zmm22
+
+// CHECK: vpermw 8128(%rdx), %zmm19, %zmm22
+// CHECK: encoding: [0x62,0xe2,0xe5,0x40,0x8d,0x72,0x7f]
+ vpermw 8128(%rdx), %zmm19, %zmm22
+
+// CHECK: vpermw 8192(%rdx), %zmm19, %zmm22
+// CHECK: encoding: [0x62,0xe2,0xe5,0x40,0x8d,0xb2,0x00,0x20,0x00,0x00]
+ vpermw 8192(%rdx), %zmm19, %zmm22
+
+// CHECK: vpermw -8192(%rdx), %zmm19, %zmm22
+// CHECK: encoding: [0x62,0xe2,0xe5,0x40,0x8d,0x72,0x80]
+ vpermw -8192(%rdx), %zmm19, %zmm22
+
+// CHECK: vpermw -8256(%rdx), %zmm19, %zmm22
+// CHECK: encoding: [0x62,0xe2,0xe5,0x40,0x8d,0xb2,0xc0,0xdf,0xff,0xff]
+ vpermw -8256(%rdx), %zmm19, %zmm22
diff --git a/test/MC/X86/x86-64-avx512bw_vl.s b/test/MC/X86/x86-64-avx512bw_vl.s
index bd16b0ed95c76..0ba5e17077bab 100644
--- a/test/MC/X86/x86-64-avx512bw_vl.s
+++ b/test/MC/X86/x86-64-avx512bw_vl.s
@@ -5775,3 +5775,164 @@
// CHECK: vpsubusw -4128(%rdx), %ymm25, %ymm27
// CHECK: encoding: [0x62,0x61,0x35,0x20,0xd9,0x9a,0xe0,0xef,0xff,0xff]
vpsubusw -4128(%rdx), %ymm25, %ymm27
+
+// CHECK: vpshufhw $171, %xmm19, %xmm23
+// CHECK: encoding: [0x62,0xa1,0xfe,0x08,0x70,0xfb,0xab]
+ vpshufhw $171, %xmm19, %xmm23
+
+// CHECK: vpshufhw $171, %xmm19, %xmm23 {%k7}
+// CHECK: encoding: [0x62,0xa1,0xfe,0x0f,0x70,0xfb,0xab]
+ vpshufhw $171, %xmm19, %xmm23 {%k7}
+
+// CHECK: vpshufhw $171, %xmm19, %xmm23 {%k7} {z}
+// CHECK: encoding: [0x62,0xa1,0xfe,0x8f,0x70,0xfb,0xab]
+ vpshufhw $171, %xmm19, %xmm23 {%k7} {z}
+
+// CHECK: vpshufhw $123, %xmm19, %xmm23
+// CHECK: encoding: [0x62,0xa1,0xfe,0x08,0x70,0xfb,0x7b]
+ vpshufhw $123, %xmm19, %xmm23
+
+// CHECK: vpshufhw $123, (%rcx), %xmm23
+// CHECK: encoding: [0x62,0xe1,0xfe,0x08,0x70,0x39,0x7b]
+ vpshufhw $123, (%rcx), %xmm23
+
+// CHECK: vpshufhw $123, 291(%rax,%r14,8), %xmm23
+// CHECK: encoding: [0x62,0xa1,0xfe,0x08,0x70,0xbc,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpshufhw $123, 291(%rax,%r14,8), %xmm23
+
+// CHECK: vpshufhw $123, 2032(%rdx), %xmm23
+// CHECK: encoding: [0x62,0xe1,0xfe,0x08,0x70,0x7a,0x7f,0x7b]
+ vpshufhw $123, 2032(%rdx), %xmm23
+
+// CHECK: vpshufhw $123, 2048(%rdx), %xmm23
+// CHECK: encoding: [0x62,0xe1,0xfe,0x08,0x70,0xba,0x00,0x08,0x00,0x00,0x7b]
+ vpshufhw $123, 2048(%rdx), %xmm23
+
+// CHECK: vpshufhw $123, -2048(%rdx), %xmm23
+// CHECK: encoding: [0x62,0xe1,0xfe,0x08,0x70,0x7a,0x80,0x7b]
+ vpshufhw $123, -2048(%rdx), %xmm23
+
+// CHECK: vpshufhw $123, -2064(%rdx), %xmm23
+// CHECK: encoding: [0x62,0xe1,0xfe,0x08,0x70,0xba,0xf0,0xf7,0xff,0xff,0x7b]
+ vpshufhw $123, -2064(%rdx), %xmm23
+
+// CHECK: vpshufhw $171, %ymm17, %ymm29
+// CHECK: encoding: [0x62,0x21,0xfe,0x28,0x70,0xe9,0xab]
+ vpshufhw $171, %ymm17, %ymm29
+
+// CHECK: vpshufhw $171, %ymm17, %ymm29 {%k7}
+// CHECK: encoding: [0x62,0x21,0xfe,0x2f,0x70,0xe9,0xab]
+ vpshufhw $171, %ymm17, %ymm29 {%k7}
+
+// CHECK: vpshufhw $171, %ymm17, %ymm29 {%k7} {z}
+// CHECK: encoding: [0x62,0x21,0xfe,0xaf,0x70,0xe9,0xab]
+ vpshufhw $171, %ymm17, %ymm29 {%k7} {z}
+
+// CHECK: vpshufhw $123, %ymm17, %ymm29
+// CHECK: encoding: [0x62,0x21,0xfe,0x28,0x70,0xe9,0x7b]
+ vpshufhw $123, %ymm17, %ymm29
+
+// CHECK: vpshufhw $123, (%rcx), %ymm29
+// CHECK: encoding: [0x62,0x61,0xfe,0x28,0x70,0x29,0x7b]
+ vpshufhw $123, (%rcx), %ymm29
+
+// CHECK: vpshufhw $123, 291(%rax,%r14,8), %ymm29
+// CHECK: encoding: [0x62,0x21,0xfe,0x28,0x70,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpshufhw $123, 291(%rax,%r14,8), %ymm29
+
+// CHECK: vpshufhw $123, 4064(%rdx), %ymm29
+// CHECK: encoding: [0x62,0x61,0xfe,0x28,0x70,0x6a,0x7f,0x7b]
+ vpshufhw $123, 4064(%rdx), %ymm29
+
+// CHECK: vpshufhw $123, 4096(%rdx), %ymm29
+// CHECK: encoding: [0x62,0x61,0xfe,0x28,0x70,0xaa,0x00,0x10,0x00,0x00,0x7b]
+ vpshufhw $123, 4096(%rdx), %ymm29
+
+// CHECK: vpshufhw $123, -4096(%rdx), %ymm29
+// CHECK: encoding: [0x62,0x61,0xfe,0x28,0x70,0x6a,0x80,0x7b]
+ vpshufhw $123, -4096(%rdx), %ymm29
+
+// CHECK: vpshufhw $123, -4128(%rdx), %ymm29
+// CHECK: encoding: [0x62,0x61,0xfe,0x28,0x70,0xaa,0xe0,0xef,0xff,0xff,0x7b]
+ vpshufhw $123, -4128(%rdx), %ymm29
+
+// CHECK: vpshuflw $171, %xmm27, %xmm30
+// CHECK: encoding: [0x62,0x01,0xff,0x08,0x70,0xf3,0xab]
+ vpshuflw $171, %xmm27, %xmm30
+
+// CHECK: vpshuflw $171, %xmm27, %xmm30 {%k6}
+// CHECK: encoding: [0x62,0x01,0xff,0x0e,0x70,0xf3,0xab]
+ vpshuflw $171, %xmm27, %xmm30 {%k6}
+
+// CHECK: vpshuflw $171, %xmm27, %xmm30 {%k6} {z}
+// CHECK: encoding: [0x62,0x01,0xff,0x8e,0x70,0xf3,0xab]
+ vpshuflw $171, %xmm27, %xmm30 {%k6} {z}
+
+// CHECK: vpshuflw $123, %xmm27, %xmm30
+// CHECK: encoding: [0x62,0x01,0xff,0x08,0x70,0xf3,0x7b]
+ vpshuflw $123, %xmm27, %xmm30
+
+// CHECK: vpshuflw $123, (%rcx), %xmm30
+// CHECK: encoding: [0x62,0x61,0xff,0x08,0x70,0x31,0x7b]
+ vpshuflw $123, (%rcx), %xmm30
+
+// CHECK: vpshuflw $123, 291(%rax,%r14,8), %xmm30
+// CHECK: encoding: [0x62,0x21,0xff,0x08,0x70,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpshuflw $123, 291(%rax,%r14,8), %xmm30
+
+// CHECK: vpshuflw $123, 2032(%rdx), %xmm30
+// CHECK: encoding: [0x62,0x61,0xff,0x08,0x70,0x72,0x7f,0x7b]
+ vpshuflw $123, 2032(%rdx), %xmm30
+
+// CHECK: vpshuflw $123, 2048(%rdx), %xmm30
+// CHECK: encoding: [0x62,0x61,0xff,0x08,0x70,0xb2,0x00,0x08,0x00,0x00,0x7b]
+ vpshuflw $123, 2048(%rdx), %xmm30
+
+// CHECK: vpshuflw $123, -2048(%rdx), %xmm30
+// CHECK: encoding: [0x62,0x61,0xff,0x08,0x70,0x72,0x80,0x7b]
+ vpshuflw $123, -2048(%rdx), %xmm30
+
+// CHECK: vpshuflw $123, -2064(%rdx), %xmm30
+// CHECK: encoding: [0x62,0x61,0xff,0x08,0x70,0xb2,0xf0,0xf7,0xff,0xff,0x7b]
+ vpshuflw $123, -2064(%rdx), %xmm30
+
+// CHECK: vpshuflw $171, %ymm25, %ymm25
+// CHECK: encoding: [0x62,0x01,0xff,0x28,0x70,0xc9,0xab]
+ vpshuflw $171, %ymm25, %ymm25
+
+// CHECK: vpshuflw $171, %ymm25, %ymm25 {%k5}
+// CHECK: encoding: [0x62,0x01,0xff,0x2d,0x70,0xc9,0xab]
+ vpshuflw $171, %ymm25, %ymm25 {%k5}
+
+// CHECK: vpshuflw $171, %ymm25, %ymm25 {%k5} {z}
+// CHECK: encoding: [0x62,0x01,0xff,0xad,0x70,0xc9,0xab]
+ vpshuflw $171, %ymm25, %ymm25 {%k5} {z}
+
+// CHECK: vpshuflw $123, %ymm25, %ymm25
+// CHECK: encoding: [0x62,0x01,0xff,0x28,0x70,0xc9,0x7b]
+ vpshuflw $123, %ymm25, %ymm25
+
+// CHECK: vpshuflw $123, (%rcx), %ymm25
+// CHECK: encoding: [0x62,0x61,0xff,0x28,0x70,0x09,0x7b]
+ vpshuflw $123, (%rcx), %ymm25
+
+// CHECK: vpshuflw $123, 291(%rax,%r14,8), %ymm25
+// CHECK: encoding: [0x62,0x21,0xff,0x28,0x70,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpshuflw $123, 291(%rax,%r14,8), %ymm25
+
+// CHECK: vpshuflw $123, 4064(%rdx), %ymm25
+// CHECK: encoding: [0x62,0x61,0xff,0x28,0x70,0x4a,0x7f,0x7b]
+ vpshuflw $123, 4064(%rdx), %ymm25
+
+// CHECK: vpshuflw $123, 4096(%rdx), %ymm25
+// CHECK: encoding: [0x62,0x61,0xff,0x28,0x70,0x8a,0x00,0x10,0x00,0x00,0x7b]
+ vpshuflw $123, 4096(%rdx), %ymm25
+
+// CHECK: vpshuflw $123, -4096(%rdx), %ymm25
+// CHECK: encoding: [0x62,0x61,0xff,0x28,0x70,0x4a,0x80,0x7b]
+ vpshuflw $123, -4096(%rdx), %ymm25
+
+// CHECK: vpshuflw $123, -4128(%rdx), %ymm25
+// CHECK: encoding: [0x62,0x61,0xff,0x28,0x70,0x8a,0xe0,0xef,0xff,0xff,0x7b]
+ vpshuflw $123, -4128(%rdx), %ymm25
+
diff --git a/test/MC/X86/x86-64-avx512dq.s b/test/MC/X86/x86-64-avx512dq.s
index 7d33abd7deb1f..92656dd3405ef 100644
--- a/test/MC/X86/x86-64-avx512dq.s
+++ b/test/MC/X86/x86-64-avx512dq.s
@@ -1158,3 +1158,235 @@
// CHECK: vbroadcasti64x2 -2064(%rdx), %zmm20
// CHECK: encoding: [0x62,0xe2,0xfd,0x48,0x5a,0xa2,0xf0,0xf7,0xff,0xff]
vbroadcasti64x2 -2064(%rdx), %zmm20
+
+// CHECK: vrangepd $171, %zmm17, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xa3,0xe5,0x40,0x50,0xc9,0xab]
+ vrangepd $0xab, %zmm17, %zmm19, %zmm17
+
+// CHECK: vrangepd $171, %zmm17, %zmm19, %zmm17 {%k6}
+// CHECK: encoding: [0x62,0xa3,0xe5,0x46,0x50,0xc9,0xab]
+ vrangepd $0xab, %zmm17, %zmm19, %zmm17 {%k6}
+
+// CHECK: vrangepd $171, %zmm17, %zmm19, %zmm17 {%k6} {z}
+// CHECK: encoding: [0x62,0xa3,0xe5,0xc6,0x50,0xc9,0xab]
+ vrangepd $0xab, %zmm17, %zmm19, %zmm17 {%k6} {z}
+
+// CHECK: vrangepd $171,{sae}, %zmm17, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xa3,0xe5,0x10,0x50,0xc9,0xab]
+ vrangepd $0xab,{sae}, %zmm17, %zmm19, %zmm17
+
+// CHECK: vrangepd $123, %zmm17, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xa3,0xe5,0x40,0x50,0xc9,0x7b]
+ vrangepd $0x7b, %zmm17, %zmm19, %zmm17
+
+// CHECK: vrangepd $123,{sae}, %zmm17, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xa3,0xe5,0x10,0x50,0xc9,0x7b]
+ vrangepd $0x7b,{sae}, %zmm17, %zmm19, %zmm17
+
+// CHECK: vrangepd $123, (%rcx), %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x40,0x50,0x09,0x7b]
+ vrangepd $0x7b,(%rcx), %zmm19, %zmm17
+
+// CHECK: vrangepd $123, 291(%rax,%r14,8), %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xa3,0xe5,0x40,0x50,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vrangepd $0x7b,291(%rax,%r14,8), %zmm19, %zmm17
+
+// CHECK: vrangepd $123, (%rcx){1to8}, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x50,0x50,0x09,0x7b]
+ vrangepd $0x7b,(%rcx){1to8}, %zmm19, %zmm17
+
+// CHECK: vrangepd $123, 8128(%rdx), %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x40,0x50,0x4a,0x7f,0x7b]
+ vrangepd $0x7b,8128(%rdx), %zmm19, %zmm17
+
+// CHECK: vrangepd $123, 8192(%rdx), %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x40,0x50,0x8a,0x00,0x20,0x00,0x00,0x7b]
+ vrangepd $0x7b,8192(%rdx), %zmm19, %zmm17
+
+// CHECK: vrangepd $123, -8192(%rdx), %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x40,0x50,0x4a,0x80,0x7b]
+ vrangepd $0x7b,-8192(%rdx), %zmm19, %zmm17
+
+// CHECK: vrangepd $123, -8256(%rdx), %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x40,0x50,0x8a,0xc0,0xdf,0xff,0xff,0x7b]
+ vrangepd $0x7b,-8256(%rdx), %zmm19, %zmm17
+
+// CHECK: vrangepd $123, 1016(%rdx){1to8}, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x50,0x50,0x4a,0x7f,0x7b]
+ vrangepd $0x7b,1016(%rdx){1to8}, %zmm19, %zmm17
+
+// CHECK: vrangepd $123, 1024(%rdx){1to8}, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x50,0x50,0x8a,0x00,0x04,0x00,0x00,0x7b]
+ vrangepd $0x7b,1024(%rdx){1to8}, %zmm19, %zmm17
+
+// CHECK: vrangepd $123, -1024(%rdx){1to8}, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x50,0x50,0x4a,0x80,0x7b]
+ vrangepd $0x7b,-1024(%rdx){1to8}, %zmm19, %zmm17
+
+// CHECK: vrangepd $123, -1032(%rdx){1to8}, %zmm19, %zmm17
+// CHECK: encoding: [0x62,0xe3,0xe5,0x50,0x50,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+ vrangepd $0x7b,-1032(%rdx){1to8}, %zmm19, %zmm17
+
+// CHECK: vrangeps $171, %zmm17, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x23,0x55,0x40,0x50,0xc1,0xab]
+ vrangeps $0xab, %zmm17, %zmm21, %zmm24
+
+// CHECK: vrangeps $171, %zmm17, %zmm21, %zmm24 {%k6}
+// CHECK: encoding: [0x62,0x23,0x55,0x46,0x50,0xc1,0xab]
+ vrangeps $0xab, %zmm17, %zmm21, %zmm24 {%k6}
+
+// CHECK: vrangeps $171, %zmm17, %zmm21, %zmm24 {%k6} {z}
+// CHECK: encoding: [0x62,0x23,0x55,0xc6,0x50,0xc1,0xab]
+ vrangeps $0xab, %zmm17, %zmm21, %zmm24 {%k6} {z}
+
+// CHECK: vrangeps $171,{sae}, %zmm17, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x23,0x55,0x10,0x50,0xc1,0xab]
+ vrangeps $0xab,{sae}, %zmm17, %zmm21, %zmm24
+
+// CHECK: vrangeps $123, %zmm17, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x23,0x55,0x40,0x50,0xc1,0x7b]
+ vrangeps $0x7b, %zmm17, %zmm21, %zmm24
+
+// CHECK: vrangeps $123,{sae}, %zmm17, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x23,0x55,0x10,0x50,0xc1,0x7b]
+ vrangeps $0x7b,{sae}, %zmm17, %zmm21, %zmm24
+
+// CHECK: vrangeps $123, (%rcx), %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x40,0x50,0x01,0x7b]
+ vrangeps $0x7b,(%rcx), %zmm21, %zmm24
+
+// CHECK: vrangeps $123, 291(%rax,%r14,8), %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x23,0x55,0x40,0x50,0x84,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vrangeps $0x7b,291(%rax,%r14,8), %zmm21, %zmm24
+
+// CHECK: vrangeps $123, (%rcx){1to16}, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x50,0x50,0x01,0x7b]
+ vrangeps $0x7b,(%rcx){1to16}, %zmm21, %zmm24
+
+// CHECK: vrangeps $123, 8128(%rdx), %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x40,0x50,0x42,0x7f,0x7b]
+ vrangeps $0x7b,8128(%rdx), %zmm21, %zmm24
+
+// CHECK: vrangeps $123, 8192(%rdx), %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x40,0x50,0x82,0x00,0x20,0x00,0x00,0x7b]
+ vrangeps $0x7b,8192(%rdx), %zmm21, %zmm24
+
+// CHECK: vrangeps $123, -8192(%rdx), %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x40,0x50,0x42,0x80,0x7b]
+ vrangeps $0x7b,-8192(%rdx), %zmm21, %zmm24
+
+// CHECK: vrangeps $123, -8256(%rdx), %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x40,0x50,0x82,0xc0,0xdf,0xff,0xff,0x7b]
+ vrangeps $0x7b,-8256(%rdx), %zmm21, %zmm24
+
+// CHECK: vrangeps $123, 508(%rdx){1to16}, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x50,0x50,0x42,0x7f,0x7b]
+ vrangeps $0x7b,508(%rdx){1to16}, %zmm21, %zmm24
+
+// CHECK: vrangeps $123, 512(%rdx){1to16}, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x50,0x50,0x82,0x00,0x02,0x00,0x00,0x7b]
+ vrangeps $0x7b,512(%rdx){1to16}, %zmm21, %zmm24
+
+// CHECK: vrangeps $123, -512(%rdx){1to16}, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x50,0x50,0x42,0x80,0x7b]
+ vrangeps $0x7b,-512(%rdx){1to16}, %zmm21, %zmm24
+
+// CHECK: vrangeps $123, -516(%rdx){1to16}, %zmm21, %zmm24
+// CHECK: encoding: [0x62,0x63,0x55,0x50,0x50,0x82,0xfc,0xfd,0xff,0xff,0x7b]
+ vrangeps $0x7b,-516(%rdx){1to16}, %zmm21, %zmm24
+
+// CHECK: vrangesd $171, %xmm21, %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xa3,0xf5,0x00,0x51,0xcd,0xab]
+ vrangesd $0xab, %xmm21, %xmm17, %xmm17
+
+// CHECK: vrangesd $171, %xmm21, %xmm17, %xmm17 {%k5}
+// CHECK: encoding: [0x62,0xa3,0xf5,0x05,0x51,0xcd,0xab]
+ vrangesd $0xab, %xmm21, %xmm17, %xmm17 {%k5}
+
+// CHECK: vrangesd $171, %xmm21, %xmm17, %xmm17 {%k5} {z}
+// CHECK: encoding: [0x62,0xa3,0xf5,0x85,0x51,0xcd,0xab]
+ vrangesd $0xab, %xmm21, %xmm17, %xmm17 {%k5} {z}
+
+// CHECK: vrangesd $171,{sae}, %xmm21, %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xa3,0xf5,0x10,0x51,0xcd,0xab]
+ vrangesd $0xab,{sae}, %xmm21, %xmm17, %xmm17
+
+// CHECK: vrangesd $123, %xmm21, %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xa3,0xf5,0x00,0x51,0xcd,0x7b]
+ vrangesd $0x7b, %xmm21, %xmm17, %xmm17
+
+// CHECK: vrangesd $123,{sae}, %xmm21, %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xa3,0xf5,0x10,0x51,0xcd,0x7b]
+ vrangesd $0x7b,{sae}, %xmm21, %xmm17, %xmm17
+
+// CHECK: vrangesd $123, (%rcx), %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xe3,0xf5,0x00,0x51,0x09,0x7b]
+ vrangesd $0x7b,(%rcx), %xmm17, %xmm17
+
+// CHECK: vrangesd $123, 291(%rax,%r14,8), %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xa3,0xf5,0x00,0x51,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vrangesd $0x7b,291(%rax,%r14,8), %xmm17, %xmm17
+
+// CHECK: vrangesd $123, 1016(%rdx), %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xe3,0xf5,0x00,0x51,0x4a,0x7f,0x7b]
+ vrangesd $0x7b,1016(%rdx), %xmm17, %xmm17
+
+// CHECK: vrangesd $123, 1024(%rdx), %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xe3,0xf5,0x00,0x51,0x8a,0x00,0x04,0x00,0x00,0x7b]
+ vrangesd $0x7b,1024(%rdx), %xmm17, %xmm17
+
+// CHECK: vrangesd $123, -1024(%rdx), %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xe3,0xf5,0x00,0x51,0x4a,0x80,0x7b]
+ vrangesd $0x7b,-1024(%rdx), %xmm17, %xmm17
+
+// CHECK: vrangesd $123, -1032(%rdx), %xmm17, %xmm17
+// CHECK: encoding: [0x62,0xe3,0xf5,0x00,0x51,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+ vrangesd $0x7b,-1032(%rdx), %xmm17, %xmm17
+
+// CHECK: vrangess $171, %xmm20, %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x23,0x3d,0x00,0x51,0xcc,0xab]
+ vrangess $0xab, %xmm20, %xmm24, %xmm25
+
+// CHECK: vrangess $171, %xmm20, %xmm24, %xmm25 {%k5}
+// CHECK: encoding: [0x62,0x23,0x3d,0x05,0x51,0xcc,0xab]
+ vrangess $0xab, %xmm20, %xmm24, %xmm25 {%k5}
+
+// CHECK: vrangess $171, %xmm20, %xmm24, %xmm25 {%k5} {z}
+// CHECK: encoding: [0x62,0x23,0x3d,0x85,0x51,0xcc,0xab]
+ vrangess $0xab, %xmm20, %xmm24, %xmm25 {%k5} {z}
+
+// CHECK: vrangess $171,{sae}, %xmm20, %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x23,0x3d,0x10,0x51,0xcc,0xab]
+ vrangess $0xab,{sae}, %xmm20, %xmm24, %xmm25
+
+// CHECK: vrangess $123, %xmm20, %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x23,0x3d,0x00,0x51,0xcc,0x7b]
+ vrangess $0x7b, %xmm20, %xmm24, %xmm25
+
+// CHECK: vrangess $123,{sae}, %xmm20, %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x23,0x3d,0x10,0x51,0xcc,0x7b]
+ vrangess $0x7b,{sae}, %xmm20, %xmm24, %xmm25
+
+// CHECK: vrangess $123, (%rcx), %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x63,0x3d,0x00,0x51,0x09,0x7b]
+ vrangess $0x7b,(%rcx), %xmm24, %xmm25
+
+// CHECK: vrangess $123, 291(%rax,%r14,8), %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x23,0x3d,0x00,0x51,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vrangess $0x7b,291(%rax,%r14,8), %xmm24, %xmm25
+
+// CHECK: vrangess $123, 508(%rdx), %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x63,0x3d,0x00,0x51,0x4a,0x7f,0x7b]
+ vrangess $0x7b,508(%rdx), %xmm24, %xmm25
+
+// CHECK: vrangess $123, 512(%rdx), %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x63,0x3d,0x00,0x51,0x8a,0x00,0x02,0x00,0x00,0x7b]
+ vrangess $0x7b,512(%rdx), %xmm24, %xmm25
+
+// CHECK: vrangess $123, -512(%rdx), %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x63,0x3d,0x00,0x51,0x4a,0x80,0x7b]
+ vrangess $0x7b,-512(%rdx), %xmm24, %xmm25
+
+// CHECK: vrangess $123, -516(%rdx), %xmm24, %xmm25
+// CHECK: encoding: [0x62,0x63,0x3d,0x00,0x51,0x8a,0xfc,0xfd,0xff,0xff,0x7b]
+ vrangess $0x7b,-516(%rdx), %xmm24, %xmm25
diff --git a/test/MC/X86/x86-64-avx512dq_vl.s b/test/MC/X86/x86-64-avx512dq_vl.s
index 2de4564b5f51a..d14ae6ec3741b 100644
--- a/test/MC/X86/x86-64-avx512dq_vl.s
+++ b/test/MC/X86/x86-64-avx512dq_vl.s
@@ -1968,3 +1968,242 @@
// CHECK: encoding: [0x62,0xe2,0xfd,0x28,0x5a,0x92,0xf0,0xf7,0xff,0xff]
vbroadcasti64x2 -2064(%rdx), %ymm18
+// CHECK: vrangepd $171, %xmm26, %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x03,0xa5,0x00,0x50,0xca,0xab]
+ vrangepd $0xab, %xmm26, %xmm27, %xmm25
+
+// CHECK: vrangepd $171, %xmm26, %xmm27, %xmm25 {%k6}
+// CHECK: encoding: [0x62,0x03,0xa5,0x06,0x50,0xca,0xab]
+ vrangepd $0xab, %xmm26, %xmm27, %xmm25 {%k6}
+
+// CHECK: vrangepd $171, %xmm26, %xmm27, %xmm25 {%k6} {z}
+// CHECK: encoding: [0x62,0x03,0xa5,0x86,0x50,0xca,0xab]
+ vrangepd $0xab, %xmm26, %xmm27, %xmm25 {%k6} {z}
+
+// CHECK: vrangepd $123, %xmm26, %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x03,0xa5,0x00,0x50,0xca,0x7b]
+ vrangepd $0x7b, %xmm26, %xmm27, %xmm25
+
+// CHECK: vrangepd $123, (%rcx), %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x00,0x50,0x09,0x7b]
+ vrangepd $0x7b,(%rcx), %xmm27, %xmm25
+
+// CHECK: vrangepd $123, 291(%rax,%r14,8), %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x23,0xa5,0x00,0x50,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vrangepd $0x7b,291(%rax,%r14,8), %xmm27, %xmm25
+
+// CHECK: vrangepd $123, (%rcx){1to2}, %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x10,0x50,0x09,0x7b]
+ vrangepd $0x7b,(%rcx){1to2}, %xmm27, %xmm25
+
+// CHECK: vrangepd $123, 2032(%rdx), %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x00,0x50,0x4a,0x7f,0x7b]
+ vrangepd $0x7b,2032(%rdx), %xmm27, %xmm25
+
+// CHECK: vrangepd $123, 2048(%rdx), %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x00,0x50,0x8a,0x00,0x08,0x00,0x00,0x7b]
+ vrangepd $0x7b,2048(%rdx), %xmm27, %xmm25
+
+// CHECK: vrangepd $123, -2048(%rdx), %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x00,0x50,0x4a,0x80,0x7b]
+ vrangepd $0x7b,-2048(%rdx), %xmm27, %xmm25
+
+// CHECK: vrangepd $123, -2064(%rdx), %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x00,0x50,0x8a,0xf0,0xf7,0xff,0xff,0x7b]
+ vrangepd $0x7b,-2064(%rdx), %xmm27, %xmm25
+
+// CHECK: vrangepd $123, 1016(%rdx){1to2}, %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x10,0x50,0x4a,0x7f,0x7b]
+ vrangepd $0x7b,1016(%rdx){1to2}, %xmm27, %xmm25
+
+// CHECK: vrangepd $123, 1024(%rdx){1to2}, %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x10,0x50,0x8a,0x00,0x04,0x00,0x00,0x7b]
+ vrangepd $0x7b,1024(%rdx){1to2}, %xmm27, %xmm25
+
+// CHECK: vrangepd $123, -1024(%rdx){1to2}, %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x10,0x50,0x4a,0x80,0x7b]
+ vrangepd $0x7b,-1024(%rdx){1to2}, %xmm27, %xmm25
+
+// CHECK: vrangepd $123, -1032(%rdx){1to2}, %xmm27, %xmm25
+// CHECK: encoding: [0x62,0x63,0xa5,0x10,0x50,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+ vrangepd $0x7b,-1032(%rdx){1to2}, %xmm27, %xmm25
+
+// CHECK: vrangepd $171, %ymm28, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x03,0xa5,0x20,0x50,0xec,0xab]
+ vrangepd $0xab, %ymm28, %ymm27, %ymm29
+
+// CHECK: vrangepd $171, %ymm28, %ymm27, %ymm29 {%k7}
+// CHECK: encoding: [0x62,0x03,0xa5,0x27,0x50,0xec,0xab]
+ vrangepd $0xab, %ymm28, %ymm27, %ymm29 {%k7}
+
+// CHECK: vrangepd $171, %ymm28, %ymm27, %ymm29 {%k7} {z}
+// CHECK: encoding: [0x62,0x03,0xa5,0xa7,0x50,0xec,0xab]
+ vrangepd $0xab, %ymm28, %ymm27, %ymm29 {%k7} {z}
+
+// CHECK: vrangepd $123, %ymm28, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x03,0xa5,0x20,0x50,0xec,0x7b]
+ vrangepd $0x7b, %ymm28, %ymm27, %ymm29
+
+// CHECK: vrangepd $123, (%rcx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x20,0x50,0x29,0x7b]
+ vrangepd $0x7b,(%rcx), %ymm27, %ymm29
+
+// CHECK: vrangepd $123, 291(%rax,%r14,8), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x23,0xa5,0x20,0x50,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vrangepd $0x7b,291(%rax,%r14,8), %ymm27, %ymm29
+
+// CHECK: vrangepd $123, (%rcx){1to4}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x30,0x50,0x29,0x7b]
+ vrangepd $0x7b,(%rcx){1to4}, %ymm27, %ymm29
+
+// CHECK: vrangepd $123, 4064(%rdx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x20,0x50,0x6a,0x7f,0x7b]
+ vrangepd $0x7b,4064(%rdx), %ymm27, %ymm29
+
+// CHECK: vrangepd $123, 4096(%rdx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x20,0x50,0xaa,0x00,0x10,0x00,0x00,0x7b]
+ vrangepd $0x7b,4096(%rdx), %ymm27, %ymm29
+
+// CHECK: vrangepd $123, -4096(%rdx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x20,0x50,0x6a,0x80,0x7b]
+ vrangepd $0x7b,-4096(%rdx), %ymm27, %ymm29
+
+// CHECK: vrangepd $123, -4128(%rdx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x20,0x50,0xaa,0xe0,0xef,0xff,0xff,0x7b]
+ vrangepd $0x7b,-4128(%rdx), %ymm27, %ymm29
+
+// CHECK: vrangepd $123, 1016(%rdx){1to4}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x30,0x50,0x6a,0x7f,0x7b]
+ vrangepd $0x7b,1016(%rdx){1to4}, %ymm27, %ymm29
+
+// CHECK: vrangepd $123, 1024(%rdx){1to4}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x30,0x50,0xaa,0x00,0x04,0x00,0x00,0x7b]
+ vrangepd $0x7b,1024(%rdx){1to4}, %ymm27, %ymm29
+
+// CHECK: vrangepd $123, -1024(%rdx){1to4}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x30,0x50,0x6a,0x80,0x7b]
+ vrangepd $0x7b,-1024(%rdx){1to4}, %ymm27, %ymm29
+
+// CHECK: vrangepd $123, -1032(%rdx){1to4}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0xa5,0x30,0x50,0xaa,0xf8,0xfb,0xff,0xff,0x7b]
+ vrangepd $0x7b,-1032(%rdx){1to4}, %ymm27, %ymm29
+
+// CHECK: vrangeps $171, %xmm24, %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x03,0x45,0x00,0x50,0xd8,0xab]
+ vrangeps $0xab, %xmm24, %xmm23, %xmm27
+
+// CHECK: vrangeps $171, %xmm24, %xmm23, %xmm27 {%k6}
+// CHECK: encoding: [0x62,0x03,0x45,0x06,0x50,0xd8,0xab]
+ vrangeps $0xab, %xmm24, %xmm23, %xmm27 {%k6}
+
+// CHECK: vrangeps $171, %xmm24, %xmm23, %xmm27 {%k6} {z}
+// CHECK: encoding: [0x62,0x03,0x45,0x86,0x50,0xd8,0xab]
+ vrangeps $0xab, %xmm24, %xmm23, %xmm27 {%k6} {z}
+
+// CHECK: vrangeps $123, %xmm24, %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x03,0x45,0x00,0x50,0xd8,0x7b]
+ vrangeps $0x7b, %xmm24, %xmm23, %xmm27
+
+// CHECK: vrangeps $123, (%rcx), %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x00,0x50,0x19,0x7b]
+ vrangeps $0x7b,(%rcx), %xmm23, %xmm27
+
+// CHECK: vrangeps $123, 291(%rax,%r14,8), %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x23,0x45,0x00,0x50,0x9c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vrangeps $0x7b,291(%rax,%r14,8), %xmm23, %xmm27
+
+// CHECK: vrangeps $123, (%rcx){1to4}, %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x10,0x50,0x19,0x7b]
+ vrangeps $0x7b,(%rcx){1to4}, %xmm23, %xmm27
+
+// CHECK: vrangeps $123, 2032(%rdx), %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x00,0x50,0x5a,0x7f,0x7b]
+ vrangeps $0x7b,2032(%rdx), %xmm23, %xmm27
+
+// CHECK: vrangeps $123, 2048(%rdx), %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x00,0x50,0x9a,0x00,0x08,0x00,0x00,0x7b]
+ vrangeps $0x7b,2048(%rdx), %xmm23, %xmm27
+
+// CHECK: vrangeps $123, -2048(%rdx), %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x00,0x50,0x5a,0x80,0x7b]
+ vrangeps $0x7b,-2048(%rdx), %xmm23, %xmm27
+
+// CHECK: vrangeps $123, -2064(%rdx), %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x00,0x50,0x9a,0xf0,0xf7,0xff,0xff,0x7b]
+ vrangeps $0x7b,-2064(%rdx), %xmm23, %xmm27
+
+// CHECK: vrangeps $123, 508(%rdx){1to4}, %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x10,0x50,0x5a,0x7f,0x7b]
+ vrangeps $0x7b,508(%rdx){1to4}, %xmm23, %xmm27
+
+// CHECK: vrangeps $123, 512(%rdx){1to4}, %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x10,0x50,0x9a,0x00,0x02,0x00,0x00,0x7b]
+ vrangeps $0x7b,512(%rdx){1to4}, %xmm23, %xmm27
+
+// CHECK: vrangeps $123, -512(%rdx){1to4}, %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x10,0x50,0x5a,0x80,0x7b]
+ vrangeps $0x7b,-512(%rdx){1to4}, %xmm23, %xmm27
+
+// CHECK: vrangeps $123, -516(%rdx){1to4}, %xmm23, %xmm27
+// CHECK: encoding: [0x62,0x63,0x45,0x10,0x50,0x9a,0xfc,0xfd,0xff,0xff,0x7b]
+ vrangeps $0x7b,-516(%rdx){1to4}, %xmm23, %xmm27
+
+// CHECK: vrangeps $171, %ymm21, %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x23,0x45,0x20,0x50,0xc5,0xab]
+ vrangeps $0xab, %ymm21, %ymm23, %ymm24
+
+// CHECK: vrangeps $171, %ymm21, %ymm23, %ymm24 {%k7}
+// CHECK: encoding: [0x62,0x23,0x45,0x27,0x50,0xc5,0xab]
+ vrangeps $0xab, %ymm21, %ymm23, %ymm24 {%k7}
+
+// CHECK: vrangeps $171, %ymm21, %ymm23, %ymm24 {%k7} {z}
+// CHECK: encoding: [0x62,0x23,0x45,0xa7,0x50,0xc5,0xab]
+ vrangeps $0xab, %ymm21, %ymm23, %ymm24 {%k7} {z}
+
+// CHECK: vrangeps $123, %ymm21, %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x23,0x45,0x20,0x50,0xc5,0x7b]
+ vrangeps $0x7b, %ymm21, %ymm23, %ymm24
+
+// CHECK: vrangeps $123, (%rcx), %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x20,0x50,0x01,0x7b]
+ vrangeps $0x7b,(%rcx), %ymm23, %ymm24
+
+// CHECK: vrangeps $123, 291(%rax,%r14,8), %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x23,0x45,0x20,0x50,0x84,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vrangeps $0x7b,291(%rax,%r14,8), %ymm23, %ymm24
+
+// CHECK: vrangeps $123, (%rcx){1to8}, %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x30,0x50,0x01,0x7b]
+ vrangeps $0x7b,(%rcx){1to8}, %ymm23, %ymm24
+
+// CHECK: vrangeps $123, 4064(%rdx), %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x20,0x50,0x42,0x7f,0x7b]
+ vrangeps $0x7b,4064(%rdx), %ymm23, %ymm24
+
+// CHECK: vrangeps $123, 4096(%rdx), %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x20,0x50,0x82,0x00,0x10,0x00,0x00,0x7b]
+ vrangeps $0x7b,4096(%rdx), %ymm23, %ymm24
+
+// CHECK: vrangeps $123, -4096(%rdx), %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x20,0x50,0x42,0x80,0x7b]
+ vrangeps $0x7b,-4096(%rdx), %ymm23, %ymm24
+
+// CHECK: vrangeps $123, -4128(%rdx), %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x20,0x50,0x82,0xe0,0xef,0xff,0xff,0x7b]
+ vrangeps $0x7b,-4128(%rdx), %ymm23, %ymm24
+
+// CHECK: vrangeps $123, 508(%rdx){1to8}, %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x30,0x50,0x42,0x7f,0x7b]
+ vrangeps $0x7b,508(%rdx){1to8}, %ymm23, %ymm24
+
+// CHECK: vrangeps $123, 512(%rdx){1to8}, %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x30,0x50,0x82,0x00,0x02,0x00,0x00,0x7b]
+ vrangeps $0x7b,512(%rdx){1to8}, %ymm23, %ymm24
+
+// CHECK: vrangeps $123, -512(%rdx){1to8}, %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x30,0x50,0x42,0x80,0x7b]
+ vrangeps $0x7b,-512(%rdx){1to8}, %ymm23, %ymm24
+
+// CHECK: vrangeps $123, -516(%rdx){1to8}, %ymm23, %ymm24
+// CHECK: encoding: [0x62,0x63,0x45,0x30,0x50,0x82,0xfc,0xfd,0xff,0xff,0x7b]
+ vrangeps $0x7b,-516(%rdx){1to8}, %ymm23, %ymm24
diff --git a/test/MC/X86/x86-64-avx512f_vl.s b/test/MC/X86/x86-64-avx512f_vl.s
index 5007726817076..f521b3e42d442 100644
--- a/test/MC/X86/x86-64-avx512f_vl.s
+++ b/test/MC/X86/x86-64-avx512f_vl.s
@@ -10188,12 +10188,947 @@ vaddpd {rz-sae}, %zmm2, %zmm1, %zmm1
// CHECK: encoding: [0x62,0xe2,0x7d,0x28,0x5a,0x9a,0xf0,0xf7,0xff,0xff]
vbroadcasti32x4 -2064(%rdx), %ymm19
+// CHECK: vfixupimmps $171, %xmm17, %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x23,0x75,0x00,0x54,0xc9,0xab]
+ vfixupimmps $0xab, %xmm17, %xmm17, %xmm25
+// CHECK: vfixupimmps $171, %xmm17, %xmm17, %xmm25 {%k3}
+// CHECK: encoding: [0x62,0x23,0x75,0x03,0x54,0xc9,0xab]
+ vfixupimmps $0xab, %xmm17, %xmm17, %xmm25 {%k3}
+// CHECK: vfixupimmps $171, %xmm17, %xmm17, %xmm25 {%k3} {z}
+// CHECK: encoding: [0x62,0x23,0x75,0x83,0x54,0xc9,0xab]
+ vfixupimmps $0xab, %xmm17, %xmm17, %xmm25 {%k3} {z}
+// CHECK: vfixupimmps $123, %xmm17, %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x23,0x75,0x00,0x54,0xc9,0x7b]
+ vfixupimmps $0x7b, %xmm17, %xmm17, %xmm25
+// CHECK: vfixupimmps $123, (%rcx), %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x00,0x54,0x09,0x7b]
+ vfixupimmps $0x7b, (%rcx), %xmm17, %xmm25
+// CHECK: vfixupimmps $123, 291(%rax,%r14,8), %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x23,0x75,0x00,0x54,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 291(%rax,%r14,8), %xmm17, %xmm25
+// CHECK: vfixupimmps $123, (%rcx){1to4}, %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x10,0x54,0x09,0x7b]
+ vfixupimmps $0x7b, (%rcx){1to4}, %xmm17, %xmm25
+// CHECK: vfixupimmps $123, 2032(%rdx), %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x00,0x54,0x4a,0x7f,0x7b]
+ vfixupimmps $0x7b, 2032(%rdx), %xmm17, %xmm25
+// CHECK: vfixupimmps $123, 2048(%rdx), %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x00,0x54,0x8a,0x00,0x08,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 2048(%rdx), %xmm17, %xmm25
+// CHECK: vfixupimmps $123, -2048(%rdx), %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x00,0x54,0x4a,0x80,0x7b]
+ vfixupimmps $0x7b, -2048(%rdx), %xmm17, %xmm25
+
+// CHECK: vfixupimmps $123, -2064(%rdx), %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x00,0x54,0x8a,0xf0,0xf7,0xff,0xff,0x7b]
+ vfixupimmps $0x7b, -2064(%rdx), %xmm17, %xmm25
+
+// CHECK: vfixupimmps $123, 508(%rdx){1to4}, %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x10,0x54,0x4a,0x7f,0x7b]
+ vfixupimmps $0x7b, 508(%rdx){1to4}, %xmm17, %xmm25
+
+// CHECK: vfixupimmps $123, 512(%rdx){1to4}, %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x10,0x54,0x8a,0x00,0x02,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 512(%rdx){1to4}, %xmm17, %xmm25
+
+// CHECK: vfixupimmps $123, -512(%rdx){1to4}, %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x10,0x54,0x4a,0x80,0x7b]
+ vfixupimmps $0x7b, -512(%rdx){1to4}, %xmm17, %xmm25
+
+// CHECK: vfixupimmps $123, -516(%rdx){1to4}, %xmm17, %xmm25
+// CHECK: encoding: [0x62,0x63,0x75,0x10,0x54,0x8a,0xfc,0xfd,0xff,0xff,0x7b]
+ vfixupimmps $0x7b, -516(%rdx){1to4}, %xmm17, %xmm25
+
+// CHECK: vfixupimmps $171, %ymm28, %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x03,0x55,0x20,0x54,0xf4,0xab]
+ vfixupimmps $0xab, %ymm28, %ymm21, %ymm30
+
+// CHECK: vfixupimmps $171, %ymm28, %ymm21, %ymm30 {%k4}
+// CHECK: encoding: [0x62,0x03,0x55,0x24,0x54,0xf4,0xab]
+ vfixupimmps $0xab, %ymm28, %ymm21, %ymm30 {%k4}
+
+// CHECK: vfixupimmps $171, %ymm28, %ymm21, %ymm30 {%k4} {z}
+// CHECK: encoding: [0x62,0x03,0x55,0xa4,0x54,0xf4,0xab]
+ vfixupimmps $0xab, %ymm28, %ymm21, %ymm30 {%k4} {z}
+
+// CHECK: vfixupimmps $123, %ymm28, %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x03,0x55,0x20,0x54,0xf4,0x7b]
+ vfixupimmps $0x7b, %ymm28, %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, (%rcx), %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x20,0x54,0x31,0x7b]
+ vfixupimmps $0x7b, (%rcx), %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, 291(%rax,%r14,8), %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x23,0x55,0x20,0x54,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 291(%rax,%r14,8), %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, (%rcx){1to8}, %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x30,0x54,0x31,0x7b]
+ vfixupimmps $0x7b, (%rcx){1to8}, %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, 4064(%rdx), %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x20,0x54,0x72,0x7f,0x7b]
+ vfixupimmps $0x7b, 4064(%rdx), %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, 4096(%rdx), %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x20,0x54,0xb2,0x00,0x10,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 4096(%rdx), %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, -4096(%rdx), %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x20,0x54,0x72,0x80,0x7b]
+ vfixupimmps $0x7b, -4096(%rdx), %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, -4128(%rdx), %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x20,0x54,0xb2,0xe0,0xef,0xff,0xff,0x7b]
+ vfixupimmps $0x7b, -4128(%rdx), %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, 508(%rdx){1to8}, %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x30,0x54,0x72,0x7f,0x7b]
+ vfixupimmps $0x7b, 508(%rdx){1to8}, %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, 512(%rdx){1to8}, %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x30,0x54,0xb2,0x00,0x02,0x00,0x00,0x7b]
+ vfixupimmps $0x7b, 512(%rdx){1to8}, %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, -512(%rdx){1to8}, %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x30,0x54,0x72,0x80,0x7b]
+ vfixupimmps $0x7b, -512(%rdx){1to8}, %ymm21, %ymm30
+
+// CHECK: vfixupimmps $123, -516(%rdx){1to8}, %ymm21, %ymm30
+// CHECK: encoding: [0x62,0x63,0x55,0x30,0x54,0xb2,0xfc,0xfd,0xff,0xff,0x7b]
+ vfixupimmps $0x7b, -516(%rdx){1to8}, %ymm21, %ymm30
+
+// CHECK: vfixupimmpd $171, %xmm25, %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x03,0xed,0x00,0x54,0xc1,0xab]
+ vfixupimmpd $0xab, %xmm25, %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $171, %xmm25, %xmm18, %xmm24 {%k3}
+// CHECK: encoding: [0x62,0x03,0xed,0x03,0x54,0xc1,0xab]
+ vfixupimmpd $0xab, %xmm25, %xmm18, %xmm24 {%k3}
+
+// CHECK: vfixupimmpd $171, %xmm25, %xmm18, %xmm24 {%k3} {z}
+// CHECK: encoding: [0x62,0x03,0xed,0x83,0x54,0xc1,0xab]
+ vfixupimmpd $0xab, %xmm25, %xmm18, %xmm24 {%k3} {z}
+
+// CHECK: vfixupimmpd $123, %xmm25, %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x03,0xed,0x00,0x54,0xc1,0x7b]
+ vfixupimmpd $0x7b, %xmm25, %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, (%rcx), %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x00,0x54,0x01,0x7b]
+ vfixupimmpd $0x7b, (%rcx), %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, 291(%rax,%r14,8), %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x23,0xed,0x00,0x54,0x84,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 291(%rax,%r14,8), %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, (%rcx){1to2}, %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x10,0x54,0x01,0x7b]
+ vfixupimmpd $0x7b, (%rcx){1to2}, %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, 2032(%rdx), %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x00,0x54,0x42,0x7f,0x7b]
+ vfixupimmpd $0x7b, 2032(%rdx), %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, 2048(%rdx), %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x00,0x54,0x82,0x00,0x08,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 2048(%rdx), %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, -2048(%rdx), %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x00,0x54,0x42,0x80,0x7b]
+ vfixupimmpd $0x7b, -2048(%rdx), %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, -2064(%rdx), %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x00,0x54,0x82,0xf0,0xf7,0xff,0xff,0x7b]
+ vfixupimmpd $0x7b, -2064(%rdx), %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, 1016(%rdx){1to2}, %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x10,0x54,0x42,0x7f,0x7b]
+ vfixupimmpd $0x7b, 1016(%rdx){1to2}, %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, 1024(%rdx){1to2}, %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x10,0x54,0x82,0x00,0x04,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 1024(%rdx){1to2}, %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, -1024(%rdx){1to2}, %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x10,0x54,0x42,0x80,0x7b]
+ vfixupimmpd $0x7b, -1024(%rdx){1to2}, %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $123, -1032(%rdx){1to2}, %xmm18, %xmm24
+// CHECK: encoding: [0x62,0x63,0xed,0x10,0x54,0x82,0xf8,0xfb,0xff,0xff,0x7b]
+ vfixupimmpd $0x7b, -1032(%rdx){1to2}, %xmm18, %xmm24
+
+// CHECK: vfixupimmpd $171, %ymm28, %ymm22, %ymm18
+// CHECK: encoding: [0x62,0x83,0xcd,0x20,0x54,0xd4,0xab]
+ vfixupimmpd $0xab, %ymm28, %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $171, %ymm28, %ymm22, %ymm18 {%k2}
+// CHECK: encoding: [0x62,0x83,0xcd,0x22,0x54,0xd4,0xab]
+ vfixupimmpd $0xab, %ymm28, %ymm22, %ymm18 {%k2}
+
+// CHECK: vfixupimmpd $171, %ymm28, %ymm22, %ymm18 {%k2} {z}
+// CHECK: encoding: [0x62,0x83,0xcd,0xa2,0x54,0xd4,0xab]
+ vfixupimmpd $0xab, %ymm28, %ymm22, %ymm18 {%k2} {z}
+
+// CHECK: vfixupimmpd $123, %ymm28, %ymm22, %ymm18
+// CHECK: encoding: [0x62,0x83,0xcd,0x20,0x54,0xd4,0x7b]
+ vfixupimmpd $0x7b, %ymm28, %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, (%rcx), %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x20,0x54,0x11,0x7b]
+ vfixupimmpd $0x7b, (%rcx), %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, 291(%rax,%r14,8), %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xa3,0xcd,0x20,0x54,0x94,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 291(%rax,%r14,8), %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, (%rcx){1to4}, %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x30,0x54,0x11,0x7b]
+ vfixupimmpd $0x7b, (%rcx){1to4}, %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, 4064(%rdx), %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x20,0x54,0x52,0x7f,0x7b]
+ vfixupimmpd $0x7b, 4064(%rdx), %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, 4096(%rdx), %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x20,0x54,0x92,0x00,0x10,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 4096(%rdx), %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, -4096(%rdx), %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x20,0x54,0x52,0x80,0x7b]
+ vfixupimmpd $0x7b, -4096(%rdx), %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, -4128(%rdx), %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x20,0x54,0x92,0xe0,0xef,0xff,0xff,0x7b]
+ vfixupimmpd $0x7b, -4128(%rdx), %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, 1016(%rdx){1to4}, %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x30,0x54,0x52,0x7f,0x7b]
+ vfixupimmpd $0x7b, 1016(%rdx){1to4}, %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, 1024(%rdx){1to4}, %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x30,0x54,0x92,0x00,0x04,0x00,0x00,0x7b]
+ vfixupimmpd $0x7b, 1024(%rdx){1to4}, %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, -1024(%rdx){1to4}, %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x30,0x54,0x52,0x80,0x7b]
+ vfixupimmpd $0x7b, -1024(%rdx){1to4}, %ymm22, %ymm18
+
+// CHECK: vfixupimmpd $123, -1032(%rdx){1to4}, %ymm22, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xcd,0x30,0x54,0x92,0xf8,0xfb,0xff,0xff,0x7b]
+ vfixupimmpd $0x7b, -1032(%rdx){1to4}, %ymm22, %ymm18
+
+// CHECK: vpshufd $171, %xmm23, %xmm17
+// CHECK: encoding: [0x62,0xa1,0x7d,0x08,0x70,0xcf,0xab]
+ vpshufd $171, %xmm23, %xmm17
+
+// CHECK: vpshufd $171, %xmm23, %xmm17 {%k1}
+// CHECK: encoding: [0x62,0xa1,0x7d,0x09,0x70,0xcf,0xab]
+ vpshufd $171, %xmm23, %xmm17 {%k1}
+
+// CHECK: vpshufd $171, %xmm23, %xmm17 {%k1} {z}
+// CHECK: encoding: [0x62,0xa1,0x7d,0x89,0x70,0xcf,0xab]
+ vpshufd $171, %xmm23, %xmm17 {%k1} {z}
+
+// CHECK: vpshufd $123, %xmm23, %xmm17
+// CHECK: encoding: [0x62,0xa1,0x7d,0x08,0x70,0xcf,0x7b]
+ vpshufd $123, %xmm23, %xmm17
+
+// CHECK: vpshufd $123, (%rcx), %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x08,0x70,0x09,0x7b]
+ vpshufd $123, (%rcx), %xmm17
+
+// CHECK: vpshufd $123, 291(%rax,%r14,8), %xmm17
+// CHECK: encoding: [0x62,0xa1,0x7d,0x08,0x70,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpshufd $123, 291(%rax,%r14,8), %xmm17
+
+
+// CHECK: vpshufd $123, 2032(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x08,0x70,0x4a,0x7f,0x7b]
+ vpshufd $123, 2032(%rdx), %xmm17
+
+// CHECK: vpshufd $123, (%rcx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x18,0x70,0x09,0x7b]
+ vpshufd $123, (%rcx){1to4}, %xmm17
+
+// CHECK: vpshufd $123, 2048(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x08,0x70,0x8a,0x00,0x08,0x00,0x00,0x7b]
+ vpshufd $123, 2048(%rdx), %xmm17
+
+// CHECK: vpshufd $123, -2048(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x08,0x70,0x4a,0x80,0x7b]
+ vpshufd $123, -2048(%rdx), %xmm17
+
+// CHECK: vpshufd $123, -2064(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x08,0x70,0x8a,0xf0,0xf7,0xff,0xff,0x7b]
+ vpshufd $123, -2064(%rdx), %xmm17
+
+// CHECK: vpshufd $123, 508(%rdx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x18,0x70,0x4a,0x7f,0x7b]
+ vpshufd $123, 508(%rdx){1to4}, %xmm17
+
+// CHECK: vpshufd $123, 512(%rdx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x18,0x70,0x8a,0x00,0x02,0x00,0x00,0x7b]
+ vpshufd $123, 512(%rdx){1to4}, %xmm17
+
+// CHECK: vpshufd $123, -512(%rdx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x18,0x70,0x4a,0x80,0x7b]
+ vpshufd $123, -512(%rdx){1to4}, %xmm17
+
+// CHECK: vpshufd $123, -516(%rdx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe1,0x7d,0x18,0x70,0x8a,0xfc,0xfd,0xff,0xff,0x7b]
+ vpshufd $123, -516(%rdx){1to4}, %xmm17
+
+// CHECK: vpshufd $171, %ymm22, %ymm20
+// CHECK: encoding: [0x62,0xa1,0x7d,0x28,0x70,0xe6,0xab]
+ vpshufd $171, %ymm22, %ymm20
+
+// CHECK: vpshufd $171, %ymm22, %ymm20 {%k2}
+// CHECK: encoding: [0x62,0xa1,0x7d,0x2a,0x70,0xe6,0xab]
+ vpshufd $171, %ymm22, %ymm20 {%k2}
+
+// CHECK: vpshufd $171, %ymm22, %ymm20 {%k2} {z}
+// CHECK: encoding: [0x62,0xa1,0x7d,0xaa,0x70,0xe6,0xab]
+ vpshufd $171, %ymm22, %ymm20 {%k2} {z}
+
+// CHECK: vpshufd $123, %ymm22, %ymm20
+// CHECK: encoding: [0x62,0xa1,0x7d,0x28,0x70,0xe6,0x7b]
+ vpshufd $123, %ymm22, %ymm20
+
+// CHECK: vpshufd $123, (%rcx), %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x28,0x70,0x21,0x7b]
+ vpshufd $123, (%rcx), %ymm20
+
+// CHECK: vpshufd $123, 291(%rax,%r14,8), %ymm20
+// CHECK: encoding: [0x62,0xa1,0x7d,0x28,0x70,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpshufd $123, 291(%rax,%r14,8), %ymm20
+
+// CHECK: vpshufd $123, (%rcx){1to8}, %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x38,0x70,0x21,0x7b]
+ vpshufd $123, (%rcx){1to8}, %ymm20
+
+// CHECK: vpshufd $123, 4064(%rdx), %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x28,0x70,0x62,0x7f,0x7b]
+ vpshufd $123, 4064(%rdx), %ymm20
+
+// CHECK: vpshufd $123, 4096(%rdx), %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x28,0x70,0xa2,0x00,0x10,0x00,0x00,0x7b]
+ vpshufd $123, 4096(%rdx), %ymm20
+
+// CHECK: vpshufd $123, -4096(%rdx), %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x28,0x70,0x62,0x80,0x7b]
+ vpshufd $123, -4096(%rdx), %ymm20
+
+// CHECK: vpshufd $123, -4128(%rdx), %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x28,0x70,0xa2,0xe0,0xef,0xff,0xff,0x7b]
+ vpshufd $123, -4128(%rdx), %ymm20
+
+// CHECK: vpshufd $123, 508(%rdx){1to8}, %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x38,0x70,0x62,0x7f,0x7b]
+ vpshufd $123, 508(%rdx){1to8}, %ymm20
+
+// CHECK: vpshufd $123, 512(%rdx){1to8}, %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x38,0x70,0xa2,0x00,0x02,0x00,0x00,0x7b]
+ vpshufd $123, 512(%rdx){1to8}, %ymm20
+
+// CHECK: vpshufd $123, -512(%rdx){1to8}, %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x38,0x70,0x62,0x80,0x7b]
+ vpshufd $123, -512(%rdx){1to8}, %ymm20
+
+// CHECK: vpshufd $123, -516(%rdx){1to8}, %ymm20
+// CHECK: encoding: [0x62,0xe1,0x7d,0x38,0x70,0xa2,0xfc,0xfd,0xff,0xff,0x7b]
+ vpshufd $123, -516(%rdx){1to8}, %ymm20
+
+// CHECK: vgetexppd %xmm18, %xmm17
+// CHECK: encoding: [0x62,0xa2,0xfd,0x08,0x42,0xca]
+ vgetexppd %xmm18, %xmm17
+
+// CHECK: vgetexppd %xmm18, %xmm17 {%k1}
+// CHECK: encoding: [0x62,0xa2,0xfd,0x09,0x42,0xca]
+ vgetexppd %xmm18, %xmm17 {%k1}
+
+// CHECK: vgetexppd %xmm18, %xmm17 {%k1} {z}
+// CHECK: encoding: [0x62,0xa2,0xfd,0x89,0x42,0xca]
+ vgetexppd %xmm18, %xmm17 {%k1} {z}
+
+// CHECK: vgetexppd (%rcx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x08,0x42,0x09]
+ vgetexppd (%rcx), %xmm17
+
+// CHECK: vgetexppd 291(%rax,%r14,8), %xmm17
+// CHECK: encoding: [0x62,0xa2,0xfd,0x08,0x42,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vgetexppd 291(%rax,%r14,8), %xmm17
+
+// CHECK: vgetexppd (%rcx){1to2}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x18,0x42,0x09]
+ vgetexppd (%rcx){1to2}, %xmm17
+
+// CHECK: vgetexppd 2032(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x08,0x42,0x4a,0x7f]
+ vgetexppd 2032(%rdx), %xmm17
+
+// CHECK: vgetexppd 2048(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x08,0x42,0x8a,0x00,0x08,0x00,0x00]
+ vgetexppd 2048(%rdx), %xmm17
+
+// CHECK: vgetexppd -2048(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x08,0x42,0x4a,0x80]
+ vgetexppd -2048(%rdx), %xmm17
+
+// CHECK: vgetexppd -2064(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x08,0x42,0x8a,0xf0,0xf7,0xff,0xff]
+ vgetexppd -2064(%rdx), %xmm17
+
+// CHECK: vgetexppd 1016(%rdx){1to2}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x18,0x42,0x4a,0x7f]
+ vgetexppd 1016(%rdx){1to2}, %xmm17
+
+// CHECK: vgetexppd 1024(%rdx){1to2}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x18,0x42,0x8a,0x00,0x04,0x00,0x00]
+ vgetexppd 1024(%rdx){1to2}, %xmm17
+
+// CHECK: vgetexppd -1024(%rdx){1to2}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x18,0x42,0x4a,0x80]
+ vgetexppd -1024(%rdx){1to2}, %xmm17
+
+// CHECK: vgetexppd -1032(%rdx){1to2}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0xfd,0x18,0x42,0x8a,0xf8,0xfb,0xff,0xff]
+ vgetexppd -1032(%rdx){1to2}, %xmm17
+
+// CHECK: vgetexppd %ymm17, %ymm20
+// CHECK: encoding: [0x62,0xa2,0xfd,0x28,0x42,0xe1]
+ vgetexppd %ymm17, %ymm20
+
+// CHECK: vgetexppd %ymm17, %ymm20 {%k3}
+// CHECK: encoding: [0x62,0xa2,0xfd,0x2b,0x42,0xe1]
+ vgetexppd %ymm17, %ymm20 {%k3}
+
+// CHECK: vgetexppd %ymm17, %ymm20 {%k3} {z}
+// CHECK: encoding: [0x62,0xa2,0xfd,0xab,0x42,0xe1]
+ vgetexppd %ymm17, %ymm20 {%k3} {z}
+
+// CHECK: vgetexppd (%rcx), %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x28,0x42,0x21]
+ vgetexppd (%rcx), %ymm20
+
+// CHECK: vgetexppd 291(%rax,%r14,8), %ymm20
+// CHECK: encoding: [0x62,0xa2,0xfd,0x28,0x42,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vgetexppd 291(%rax,%r14,8), %ymm20
+
+// CHECK: vgetexppd (%rcx){1to4}, %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x38,0x42,0x21]
+ vgetexppd (%rcx){1to4}, %ymm20
+
+// CHECK: vgetexppd 4064(%rdx), %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x28,0x42,0x62,0x7f]
+ vgetexppd 4064(%rdx), %ymm20
+
+// CHECK: vgetexppd 4096(%rdx), %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x28,0x42,0xa2,0x00,0x10,0x00,0x00]
+ vgetexppd 4096(%rdx), %ymm20
+
+// CHECK: vgetexppd -4096(%rdx), %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x28,0x42,0x62,0x80]
+ vgetexppd -4096(%rdx), %ymm20
+
+// CHECK: vgetexppd -4128(%rdx), %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x28,0x42,0xa2,0xe0,0xef,0xff,0xff]
+ vgetexppd -4128(%rdx), %ymm20
+
+// CHECK: vgetexppd 1016(%rdx){1to4}, %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x38,0x42,0x62,0x7f]
+ vgetexppd 1016(%rdx){1to4}, %ymm20
+
+// CHECK: vgetexppd 1024(%rdx){1to4}, %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x38,0x42,0xa2,0x00,0x04,0x00,0x00]
+ vgetexppd 1024(%rdx){1to4}, %ymm20
+
+// CHECK: vgetexppd -1024(%rdx){1to4}, %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x38,0x42,0x62,0x80]
+ vgetexppd -1024(%rdx){1to4}, %ymm20
+
+// CHECK: vgetexppd -1032(%rdx){1to4}, %ymm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x38,0x42,0xa2,0xf8,0xfb,0xff,0xff]
+ vgetexppd -1032(%rdx){1to4}, %ymm20
+
+// CHECK: vgetexpps %xmm27, %xmm17
+// CHECK: encoding: [0x62,0x82,0x7d,0x08,0x42,0xcb]
+ vgetexpps %xmm27, %xmm17
+
+// CHECK: vgetexpps %xmm27, %xmm17 {%k2}
+// CHECK: encoding: [0x62,0x82,0x7d,0x0a,0x42,0xcb]
+ vgetexpps %xmm27, %xmm17 {%k2}
+
+// CHECK: vgetexpps %xmm27, %xmm17 {%k2} {z}
+// CHECK: encoding: [0x62,0x82,0x7d,0x8a,0x42,0xcb]
+ vgetexpps %xmm27, %xmm17 {%k2} {z}
+
+// CHECK: vgetexpps (%rcx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x08,0x42,0x09]
+ vgetexpps (%rcx), %xmm17
+
+// CHECK: vgetexpps 291(%rax,%r14,8), %xmm17
+// CHECK: encoding: [0x62,0xa2,0x7d,0x08,0x42,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vgetexpps 291(%rax,%r14,8), %xmm17
+
+// CHECK: vgetexpps (%rcx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x18,0x42,0x09]
+ vgetexpps (%rcx){1to4}, %xmm17
+
+// CHECK: vgetexpps 2032(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x08,0x42,0x4a,0x7f]
+ vgetexpps 2032(%rdx), %xmm17
+
+// CHECK: vgetexpps 2048(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x08,0x42,0x8a,0x00,0x08,0x00,0x00]
+ vgetexpps 2048(%rdx), %xmm17
+
+// CHECK: vgetexpps -2048(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x08,0x42,0x4a,0x80]
+ vgetexpps -2048(%rdx), %xmm17
+
+// CHECK: vgetexpps -2064(%rdx), %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x08,0x42,0x8a,0xf0,0xf7,0xff,0xff]
+ vgetexpps -2064(%rdx), %xmm17
+
+// CHECK: vgetexpps 508(%rdx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x18,0x42,0x4a,0x7f]
+ vgetexpps 508(%rdx){1to4}, %xmm17
+
+// CHECK: vgetexpps 512(%rdx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x18,0x42,0x8a,0x00,0x02,0x00,0x00]
+ vgetexpps 512(%rdx){1to4}, %xmm17
+
+// CHECK: vgetexpps -512(%rdx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x18,0x42,0x4a,0x80]
+ vgetexpps -512(%rdx){1to4}, %xmm17
+
+// CHECK: vgetexpps -516(%rdx){1to4}, %xmm17
+// CHECK: encoding: [0x62,0xe2,0x7d,0x18,0x42,0x8a,0xfc,0xfd,0xff,0xff]
+ vgetexpps -516(%rdx){1to4}, %xmm17
+
+// CHECK: vgetexpps %ymm29, %ymm30
+// CHECK: encoding: [0x62,0x02,0x7d,0x28,0x42,0xf5]
+ vgetexpps %ymm29, %ymm30
+
+// CHECK: vgetexpps %ymm29, %ymm30 {%k6}
+// CHECK: encoding: [0x62,0x02,0x7d,0x2e,0x42,0xf5]
+ vgetexpps %ymm29, %ymm30 {%k6}
+
+// CHECK: vgetexpps %ymm29, %ymm30 {%k6} {z}
+// CHECK: encoding: [0x62,0x02,0x7d,0xae,0x42,0xf5]
+ vgetexpps %ymm29, %ymm30 {%k6} {z}
+
+// CHECK: vgetexpps (%rcx), %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x28,0x42,0x31]
+ vgetexpps (%rcx), %ymm30
+
+// CHECK: vgetexpps 291(%rax,%r14,8), %ymm30
+// CHECK: encoding: [0x62,0x22,0x7d,0x28,0x42,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vgetexpps 291(%rax,%r14,8), %ymm30
+
+// CHECK: vgetexpps (%rcx){1to8}, %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x38,0x42,0x31]
+ vgetexpps (%rcx){1to8}, %ymm30
+
+// CHECK: vgetexpps 4064(%rdx), %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x28,0x42,0x72,0x7f]
+ vgetexpps 4064(%rdx), %ymm30
+
+// CHECK: vgetexpps 4096(%rdx), %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x28,0x42,0xb2,0x00,0x10,0x00,0x00]
+ vgetexpps 4096(%rdx), %ymm30
+
+// CHECK: vgetexpps -4096(%rdx), %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x28,0x42,0x72,0x80]
+ vgetexpps -4096(%rdx), %ymm30
+
+// CHECK: vgetexpps -4128(%rdx), %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x28,0x42,0xb2,0xe0,0xef,0xff,0xff]
+ vgetexpps -4128(%rdx), %ymm30
+
+// CHECK: vgetexpps 508(%rdx){1to8}, %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x38,0x42,0x72,0x7f]
+ vgetexpps 508(%rdx){1to8}, %ymm30
+
+// CHECK: vgetexpps 512(%rdx){1to8}, %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x38,0x42,0xb2,0x00,0x02,0x00,0x00]
+ vgetexpps 512(%rdx){1to8}, %ymm30
+
+// CHECK: vgetexpps -512(%rdx){1to8}, %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x38,0x42,0x72,0x80]
+ vgetexpps -512(%rdx){1to8}, %ymm30
+
+// CHECK: vgetexpps -516(%rdx){1to8}, %ymm30
+// CHECK: encoding: [0x62,0x62,0x7d,0x38,0x42,0xb2,0xfc,0xfd,0xff,0xff]
+ vgetexpps -516(%rdx){1to8}, %ymm30
+
+// CHECK: vshuff32x4 $171, %ymm18, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x23,0x25,0x20,0x23,0xea,0xab]
+ vshuff32x4 $0xab, %ymm18, %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $171, %ymm18, %ymm27, %ymm29 {%k7}
+// CHECK: encoding: [0x62,0x23,0x25,0x27,0x23,0xea,0xab]
+ vshuff32x4 $0xab, %ymm18, %ymm27, %ymm29 {%k7}
+
+// CHECK: vshuff32x4 $171, %ymm18, %ymm27, %ymm29 {%k7} {z}
+// CHECK: encoding: [0x62,0x23,0x25,0xa7,0x23,0xea,0xab]
+ vshuff32x4 $0xab, %ymm18, %ymm27, %ymm29 {%k7} {z}
+
+// CHECK: vshuff32x4 $123, %ymm18, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x23,0x25,0x20,0x23,0xea,0x7b]
+ vshuff32x4 $0x7b, %ymm18, %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, (%rcx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x20,0x23,0x29,0x7b]
+ vshuff32x4 $0x7b, (%rcx), %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, 291(%rax,%r14,8), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x23,0x25,0x20,0x23,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vshuff32x4 $0x7b, 291(%rax,%r14,8), %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, (%rcx){1to8}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x30,0x23,0x29,0x7b]
+ vshuff32x4 $0x7b, (%rcx){1to8}, %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, 4064(%rdx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x20,0x23,0x6a,0x7f,0x7b]
+ vshuff32x4 $0x7b, 4064(%rdx), %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, 4096(%rdx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x20,0x23,0xaa,0x00,0x10,0x00,0x00,0x7b]
+ vshuff32x4 $0x7b, 4096(%rdx), %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, -4096(%rdx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x20,0x23,0x6a,0x80,0x7b]
+ vshuff32x4 $0x7b, -4096(%rdx), %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, -4128(%rdx), %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x20,0x23,0xaa,0xe0,0xef,0xff,0xff,0x7b]
+ vshuff32x4 $0x7b, -4128(%rdx), %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, 508(%rdx){1to8}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x30,0x23,0x6a,0x7f,0x7b]
+ vshuff32x4 $0x7b, 508(%rdx){1to8}, %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, 512(%rdx){1to8}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x30,0x23,0xaa,0x00,0x02,0x00,0x00,0x7b]
+ vshuff32x4 $0x7b, 512(%rdx){1to8}, %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, -512(%rdx){1to8}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x30,0x23,0x6a,0x80,0x7b]
+ vshuff32x4 $0x7b, -512(%rdx){1to8}, %ymm27, %ymm29
+
+// CHECK: vshuff32x4 $123, -516(%rdx){1to8}, %ymm27, %ymm29
+// CHECK: encoding: [0x62,0x63,0x25,0x30,0x23,0xaa,0xfc,0xfd,0xff,0xff,0x7b]
+ vshuff32x4 $0x7b, -516(%rdx){1to8}, %ymm27, %ymm29
+
+// CHECK: vshuff64x2 $171, %ymm20, %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xa3,0xed,0x20,0x23,0xd4,0xab]
+ vshuff64x2 $0xab, %ymm20, %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $171, %ymm20, %ymm18, %ymm18 {%k5}
+// CHECK: encoding: [0x62,0xa3,0xed,0x25,0x23,0xd4,0xab]
+ vshuff64x2 $0xab, %ymm20, %ymm18, %ymm18 {%k5}
+
+// CHECK: vshuff64x2 $171, %ymm20, %ymm18, %ymm18 {%k5} {z}
+// CHECK: encoding: [0x62,0xa3,0xed,0xa5,0x23,0xd4,0xab]
+ vshuff64x2 $0xab, %ymm20, %ymm18, %ymm18 {%k5} {z}
+
+// CHECK: vshuff64x2 $123, %ymm20, %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xa3,0xed,0x20,0x23,0xd4,0x7b]
+ vshuff64x2 $0x7b, %ymm20, %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, (%rcx), %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x20,0x23,0x11,0x7b]
+ vshuff64x2 $0x7b, (%rcx), %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, 291(%rax,%r14,8), %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xa3,0xed,0x20,0x23,0x94,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vshuff64x2 $0x7b, 291(%rax,%r14,8), %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, (%rcx){1to4}, %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x30,0x23,0x11,0x7b]
+ vshuff64x2 $0x7b, (%rcx){1to4}, %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, 4064(%rdx), %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x20,0x23,0x52,0x7f,0x7b]
+ vshuff64x2 $0x7b, 4064(%rdx), %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, 4096(%rdx), %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x20,0x23,0x92,0x00,0x10,0x00,0x00,0x7b]
+ vshuff64x2 $0x7b, 4096(%rdx), %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, -4096(%rdx), %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x20,0x23,0x52,0x80,0x7b]
+ vshuff64x2 $0x7b, -4096(%rdx), %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, -4128(%rdx), %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x20,0x23,0x92,0xe0,0xef,0xff,0xff,0x7b]
+ vshuff64x2 $0x7b, -4128(%rdx), %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, 1016(%rdx){1to4}, %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x30,0x23,0x52,0x7f,0x7b]
+ vshuff64x2 $0x7b, 1016(%rdx){1to4}, %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, 1024(%rdx){1to4}, %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x30,0x23,0x92,0x00,0x04,0x00,0x00,0x7b]
+ vshuff64x2 $0x7b, 1024(%rdx){1to4}, %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, -1024(%rdx){1to4}, %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x30,0x23,0x52,0x80,0x7b]
+ vshuff64x2 $0x7b, -1024(%rdx){1to4}, %ymm18, %ymm18
+
+// CHECK: vshuff64x2 $123, -1032(%rdx){1to4}, %ymm18, %ymm18
+// CHECK: encoding: [0x62,0xe3,0xed,0x30,0x23,0x92,0xf8,0xfb,0xff,0xff,0x7b]
+ vshuff64x2 $0x7b, -1032(%rdx){1to4}, %ymm18, %ymm18
+
+// CHECK: vshufi32x4 $171, %ymm17, %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xa3,0x25,0x20,0x43,0xd1,0xab]
+ vshufi32x4 $0xab, %ymm17, %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $171, %ymm17, %ymm27, %ymm18 {%k7}
+// CHECK: encoding: [0x62,0xa3,0x25,0x27,0x43,0xd1,0xab]
+ vshufi32x4 $0xab, %ymm17, %ymm27, %ymm18 {%k7}
+
+// CHECK: vshufi32x4 $171, %ymm17, %ymm27, %ymm18 {%k7} {z}
+// CHECK: encoding: [0x62,0xa3,0x25,0xa7,0x43,0xd1,0xab]
+ vshufi32x4 $0xab, %ymm17, %ymm27, %ymm18 {%k7} {z}
+
+// CHECK: vshufi32x4 $123, %ymm17, %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xa3,0x25,0x20,0x43,0xd1,0x7b]
+ vshufi32x4 $0x7b, %ymm17, %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, (%rcx), %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x20,0x43,0x11,0x7b]
+ vshufi32x4 $0x7b, (%rcx), %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, 291(%rax,%r14,8), %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xa3,0x25,0x20,0x43,0x94,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vshufi32x4 $0x7b, 291(%rax,%r14,8), %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, (%rcx){1to8}, %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x30,0x43,0x11,0x7b]
+ vshufi32x4 $0x7b, (%rcx){1to8}, %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, 4064(%rdx), %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x20,0x43,0x52,0x7f,0x7b]
+ vshufi32x4 $0x7b, 4064(%rdx), %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, 4096(%rdx), %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x20,0x43,0x92,0x00,0x10,0x00,0x00,0x7b]
+ vshufi32x4 $0x7b, 4096(%rdx), %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, -4096(%rdx), %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x20,0x43,0x52,0x80,0x7b]
+ vshufi32x4 $0x7b, -4096(%rdx), %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, -4128(%rdx), %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x20,0x43,0x92,0xe0,0xef,0xff,0xff,0x7b]
+ vshufi32x4 $0x7b, -4128(%rdx), %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, 508(%rdx){1to8}, %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x30,0x43,0x52,0x7f,0x7b]
+ vshufi32x4 $0x7b, 508(%rdx){1to8}, %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, 512(%rdx){1to8}, %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x30,0x43,0x92,0x00,0x02,0x00,0x00,0x7b]
+ vshufi32x4 $0x7b, 512(%rdx){1to8}, %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, -512(%rdx){1to8}, %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x30,0x43,0x52,0x80,0x7b]
+ vshufi32x4 $0x7b, -512(%rdx){1to8}, %ymm27, %ymm18
+
+// CHECK: vshufi32x4 $123, -516(%rdx){1to8}, %ymm27, %ymm18
+// CHECK: encoding: [0x62,0xe3,0x25,0x30,0x43,0x92,0xfc,0xfd,0xff,0xff,0x7b]
+ vshufi32x4 $0x7b, -516(%rdx){1to8}, %ymm27, %ymm18
+
+// CHECK: vshufi64x2 $171, %ymm21, %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x23,0xad,0x20,0x43,0xcd,0xab]
+ vshufi64x2 $0xab, %ymm21, %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $171, %ymm21, %ymm26, %ymm25 {%k3}
+// CHECK: encoding: [0x62,0x23,0xad,0x23,0x43,0xcd,0xab]
+ vshufi64x2 $0xab, %ymm21, %ymm26, %ymm25 {%k3}
+
+// CHECK: vshufi64x2 $171, %ymm21, %ymm26, %ymm25 {%k3} {z}
+// CHECK: encoding: [0x62,0x23,0xad,0xa3,0x43,0xcd,0xab]
+ vshufi64x2 $0xab, %ymm21, %ymm26, %ymm25 {%k3} {z}
+
+// CHECK: vshufi64x2 $123, %ymm21, %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x23,0xad,0x20,0x43,0xcd,0x7b]
+ vshufi64x2 $0x7b, %ymm21, %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, (%rcx), %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x20,0x43,0x09,0x7b]
+ vshufi64x2 $0x7b, (%rcx), %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, 291(%rax,%r14,8), %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x23,0xad,0x20,0x43,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vshufi64x2 $0x7b, 291(%rax,%r14,8), %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, (%rcx){1to4}, %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x30,0x43,0x09,0x7b]
+ vshufi64x2 $0x7b, (%rcx){1to4}, %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, 4064(%rdx), %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x20,0x43,0x4a,0x7f,0x7b]
+ vshufi64x2 $0x7b, 4064(%rdx), %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, 4096(%rdx), %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x20,0x43,0x8a,0x00,0x10,0x00,0x00,0x7b]
+ vshufi64x2 $0x7b, 4096(%rdx), %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, -4096(%rdx), %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x20,0x43,0x4a,0x80,0x7b]
+ vshufi64x2 $0x7b, -4096(%rdx), %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, -4128(%rdx), %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x20,0x43,0x8a,0xe0,0xef,0xff,0xff,0x7b]
+ vshufi64x2 $0x7b, -4128(%rdx), %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, 1016(%rdx){1to4}, %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x30,0x43,0x4a,0x7f,0x7b]
+ vshufi64x2 $0x7b, 1016(%rdx){1to4}, %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, 1024(%rdx){1to4}, %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x30,0x43,0x8a,0x00,0x04,0x00,0x00,0x7b]
+ vshufi64x2 $0x7b, 1024(%rdx){1to4}, %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, -1024(%rdx){1to4}, %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x30,0x43,0x4a,0x80,0x7b]
+ vshufi64x2 $0x7b, -1024(%rdx){1to4}, %ymm26, %ymm25
+
+// CHECK: vshufi64x2 $123, -1032(%rdx){1to4}, %ymm26, %ymm25
+// CHECK: encoding: [0x62,0x63,0xad,0x30,0x43,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+ vshufi64x2 $0x7b, -1032(%rdx){1to4}, %ymm26, %ymm25
+
+// CHECK: valignq $171, %xmm24, %xmm18, %xmm19
+// CHECK: encoding: [0x62,0x83,0xed,0x00,0x03,0xd8,0xab]
+ valignq $0xab, %xmm24, %xmm18, %xmm19
+
+// CHECK: valignq $171, %xmm24, %xmm18, %xmm19 {%k5}
+// CHECK: encoding: [0x62,0x83,0xed,0x05,0x03,0xd8,0xab]
+ valignq $0xab, %xmm24, %xmm18, %xmm19 {%k5}
+
+// CHECK: valignq $171, %xmm24, %xmm18, %xmm19 {%k5} {z}
+// CHECK: encoding: [0x62,0x83,0xed,0x85,0x03,0xd8,0xab]
+ valignq $0xab, %xmm24, %xmm18, %xmm19 {%k5} {z}
+
+// CHECK: valignq $123, %xmm24, %xmm18, %xmm19
+// CHECK: encoding: [0x62,0x83,0xed,0x00,0x03,0xd8,0x7b]
+ valignq $0x7b, %xmm24, %xmm18, %xmm19
+
+// CHECK: valignq $123, (%rcx), %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x00,0x03,0x19,0x7b]
+ valignq $0x7b, (%rcx), %xmm18, %xmm19
+
+// CHECK: valignq $123, 291(%rax,%r14,8), %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xa3,0xed,0x00,0x03,0x9c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ valignq $0x7b, 291(%rax,%r14,8), %xmm18, %xmm19
+
+// CHECK: valignq $123, (%rcx){1to2}, %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x10,0x03,0x19,0x7b]
+ valignq $0x7b, (%rcx){1to2}, %xmm18, %xmm19
+
+// CHECK: valignq $123, 2032(%rdx), %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x00,0x03,0x5a,0x7f,0x7b]
+ valignq $0x7b, 2032(%rdx), %xmm18, %xmm19
+
+// CHECK: valignq $123, 2048(%rdx), %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x00,0x03,0x9a,0x00,0x08,0x00,0x00,0x7b]
+ valignq $0x7b, 2048(%rdx), %xmm18, %xmm19
+
+// CHECK: valignq $123, -2048(%rdx), %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x00,0x03,0x5a,0x80,0x7b]
+ valignq $0x7b, -2048(%rdx), %xmm18, %xmm19
+
+// CHECK: valignq $123, -2064(%rdx), %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x00,0x03,0x9a,0xf0,0xf7,0xff,0xff,0x7b]
+ valignq $0x7b, -2064(%rdx), %xmm18, %xmm19
+
+// CHECK: valignq $123, 1016(%rdx){1to2}, %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x10,0x03,0x5a,0x7f,0x7b]
+ valignq $0x7b, 1016(%rdx){1to2}, %xmm18, %xmm19
+
+// CHECK: valignq $123, 1024(%rdx){1to2}, %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x10,0x03,0x9a,0x00,0x04,0x00,0x00,0x7b]
+ valignq $0x7b, 1024(%rdx){1to2}, %xmm18, %xmm19
+
+// CHECK: valignq $123, -1024(%rdx){1to2}, %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x10,0x03,0x5a,0x80,0x7b]
+ valignq $0x7b, -1024(%rdx){1to2}, %xmm18, %xmm19
+
+// CHECK: valignq $123, -1032(%rdx){1to2}, %xmm18, %xmm19
+// CHECK: encoding: [0x62,0xe3,0xed,0x10,0x03,0x9a,0xf8,0xfb,0xff,0xff,0x7b]
+ valignq $0x7b, -1032(%rdx){1to2}, %xmm18, %xmm19
+
+// CHECK: valignq $171, %ymm26, %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x03,0xbd,0x20,0x03,0xca,0xab]
+ valignq $0xab, %ymm26, %ymm24, %ymm25
+
+// CHECK: valignq $171, %ymm26, %ymm24, %ymm25 {%k2}
+// CHECK: encoding: [0x62,0x03,0xbd,0x22,0x03,0xca,0xab]
+ valignq $0xab, %ymm26, %ymm24, %ymm25 {%k2}
+
+// CHECK: valignq $171, %ymm26, %ymm24, %ymm25 {%k2} {z}
+// CHECK: encoding: [0x62,0x03,0xbd,0xa2,0x03,0xca,0xab]
+ valignq $0xab, %ymm26, %ymm24, %ymm25 {%k2} {z}
+
+// CHECK: valignq $123, %ymm26, %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x03,0xbd,0x20,0x03,0xca,0x7b]
+ valignq $0x7b, %ymm26, %ymm24, %ymm25
+
+// CHECK: valignq $123, (%rcx), %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x20,0x03,0x09,0x7b]
+ valignq $0x7b, (%rcx), %ymm24, %ymm25
+
+// CHECK: valignq $123, 291(%rax,%r14,8), %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x23,0xbd,0x20,0x03,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ valignq $0x7b, 291(%rax,%r14,8), %ymm24, %ymm25
+
+// CHECK: valignq $123, (%rcx){1to4}, %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x30,0x03,0x09,0x7b]
+ valignq $0x7b, (%rcx){1to4}, %ymm24, %ymm25
+
+// CHECK: valignq $123, 4064(%rdx), %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x20,0x03,0x4a,0x7f,0x7b]
+ valignq $0x7b, 4064(%rdx), %ymm24, %ymm25
+
+// CHECK: valignq $123, 4096(%rdx), %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x20,0x03,0x8a,0x00,0x10,0x00,0x00,0x7b]
+ valignq $0x7b, 4096(%rdx), %ymm24, %ymm25
+
+// CHECK: valignq $123, -4096(%rdx), %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x20,0x03,0x4a,0x80,0x7b]
+ valignq $0x7b, -4096(%rdx), %ymm24, %ymm25
+
+// CHECK: valignq $123, -4128(%rdx), %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x20,0x03,0x8a,0xe0,0xef,0xff,0xff,0x7b]
+ valignq $0x7b, -4128(%rdx), %ymm24, %ymm25
+
+// CHECK: valignq $123, 1016(%rdx){1to4}, %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x30,0x03,0x4a,0x7f,0x7b]
+ valignq $0x7b, 1016(%rdx){1to4}, %ymm24, %ymm25
+
+// CHECK: valignq $123, 1024(%rdx){1to4}, %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x30,0x03,0x8a,0x00,0x04,0x00,0x00,0x7b]
+ valignq $0x7b, 1024(%rdx){1to4}, %ymm24, %ymm25
+
+// CHECK: valignq $123, -1024(%rdx){1to4}, %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x30,0x03,0x4a,0x80,0x7b]
+ valignq $0x7b, -1024(%rdx){1to4}, %ymm24, %ymm25
+
+// CHECK: valignq $123, -1032(%rdx){1to4}, %ymm24, %ymm25
+// CHECK: encoding: [0x62,0x63,0xbd,0x30,0x03,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+ valignq $0x7b, -1032(%rdx){1to4}, %ymm24, %ymm25
diff --git a/test/Object/Inputs/macho-invalid-header b/test/Object/Inputs/macho-invalid-header
new file mode 100644
index 0000000000000..da52d43a03a49
--- /dev/null
+++ b/test/Object/Inputs/macho-invalid-header
Binary files differ
diff --git a/test/Object/Inputs/macho64-invalid-incomplete-segment-load-command b/test/Object/Inputs/macho64-invalid-incomplete-segment-load-command
new file mode 100644
index 0000000000000..82ec7246bdb7f
--- /dev/null
+++ b/test/Object/Inputs/macho64-invalid-incomplete-segment-load-command
Binary files differ
diff --git a/test/Object/Inputs/no-start-symbol.elf-x86_64 b/test/Object/Inputs/no-start-symbol.elf-x86_64
new file mode 100644
index 0000000000000..b2c9edc300d03
--- /dev/null
+++ b/test/Object/Inputs/no-start-symbol.elf-x86_64
Binary files differ
diff --git a/test/Object/X86/no-start-symbol.test b/test/Object/X86/no-start-symbol.test
new file mode 100644
index 0000000000000..b468894c7b6bb
--- /dev/null
+++ b/test/Object/X86/no-start-symbol.test
@@ -0,0 +1,9 @@
+RUN: llvm-objdump -d %p/../Inputs/no-start-symbol.elf-x86_64 | FileCheck %s
+
+Test that we disassemble the start of the section.
+
+CHECK: Disassembly of section .text:
+CHECK-NEXT: .text:
+CHECK-NEXT: 0: 90 nop
+CHECK: foo:
+CHECK-NEXT: 1: 90 nop
diff --git a/test/Object/macho-invalid.test b/test/Object/macho-invalid.test
index e2c9b6b7360e0..f4aa1e0c2984b 100644
--- a/test/Object/macho-invalid.test
+++ b/test/Object/macho-invalid.test
@@ -3,36 +3,40 @@ RUN: llvm-objdump -private-headers %p/Inputs/macho-invalid-zero-ncmds
RUN: not llvm-objdump -private-headers %p/Inputs/macho64-invalid-incomplete-load-command 2>&1 \
RUN: | FileCheck -check-prefix INCOMPLETE-LOADC %s
+INCOMPLETE-LOADC: Invalid data was encountered while parsing the file.
RUN: not llvm-objdump -private-headers %p/Inputs/macho-invalid-too-small-load-command 2>&1 \
RUN: | FileCheck -check-prefix SMALL-LOADC-SIZE %s
RUN: not llvm-objdump -private-headers %p/Inputs/macho64-invalid-too-small-load-command 2>&1 \
RUN: | FileCheck -check-prefix SMALL-LOADC-SIZE %s
+SMALL-LOADC-SIZE: Mach-O load command with size < 8 bytes
RUN: not llvm-objdump -private-headers %p/Inputs/macho-invalid-too-small-segment-load-command 2>&1 \
RUN: | FileCheck -check-prefix SMALL-SEGLOADC-SIZE %s
RUN: not llvm-objdump -private-headers %p/Inputs/macho64-invalid-too-small-segment-load-command 2>&1 \
RUN: | FileCheck -check-prefix SMALL-SEGLOADC-SIZE %s
+SMALL-SEGLOADC-SIZE: Mach-O segment load command size is too small
RUN: not llvm-objdump -private-headers %p/Inputs/macho-invalid-no-size-for-sections 2>&1 \
RUN: | FileCheck -check-prefix TOO-MANY-SECTS %s
RUN: not llvm-objdump -private-headers %p/Inputs/macho64-invalid-no-size-for-sections 2>&1 \
RUN: | FileCheck -check-prefix TOO-MANY-SECTS %s
+TOO-MANY-SECTS: Mach-O segment load command contains too many sections
RUN: not llvm-objdump -t %p/Inputs/macho-invalid-bad-symbol-index 2>&1 \
RUN: | FileCheck -check-prefix BAD-SYMBOL %s
+BAD-SYMBOL: Requested symbol index is out of range
RUN: not llvm-objdump -t %p/Inputs/macho-invalid-symbol-name-past-eof 2>&1 \
RUN: | FileCheck -check-prefix NAME-PAST-EOF %s
+NAME-PAST-EOF: Symbol name entry points before beginning or past end of file
RUN: not llvm-nm %p/Inputs/macho-invalid-section-index-getSectionRawName 2>&1 \
RUN: | FileCheck -check-prefix INVALID-SECTION-IDX-SYMBOL-SEC %s
+INVALID-SECTION-IDX-SYMBOL-SEC: getSymbolSection: Invalid section index
-SMALL-LOADC-SIZE: Load command with size < 8 bytes
-SMALL-SEGLOADC-SIZE: Segment load command size is too small
-INCOMPLETE-LOADC: Malformed MachO file
-TOO-MANY-SECTS: Number of sections too large for size of load command
-BAD-SYMBOL: Requested symbol index is out of range
-NAME-PAST-EOF: Symbol name entry points before beginning or past end of file
+RUN: not llvm-objdump -private-headers %p/Inputs/macho-invalid-header 2>&1 | FileCheck -check-prefix INVALID-HEADER %s
+INVALID-HEADER: Invalid data was encountered while parsing the file
-INVALID-SECTION-IDX-SYMBOL-SEC: getSymbolSection: Invalid section index
+RUN: not llvm-objdump -private-headers %p/Inputs/macho64-invalid-incomplete-segment-load-command 2>&1 | FileCheck -check-prefix INCOMPLETE-SEGMENT-LOADC %s
+INCOMPLETE-SEGMENT-LOADC: Invalid data was encountered while parsing the file
diff --git a/test/Object/obj2yaml.test b/test/Object/obj2yaml.test
index 2a3f7c841dd12..08000f66581bd 100644
--- a/test/Object/obj2yaml.test
+++ b/test/Object/obj2yaml.test
@@ -267,26 +267,19 @@ ELF-MIPSEL-NEXT: - Name: '$.str'
ELF-MIPSEL-NEXT: Type: STT_OBJECT
ELF-MIPSEL-NEXT: Section: .rodata.str1.1
ELF-MIPSEL-NEXT: Size: 0x000000000000000D
-ELF-MIPSEL-NEXT: - Name: .text
-ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: - Type: STT_SECTION
ELF-MIPSEL-NEXT: Section: .text
-ELF-MIPSEL-NEXT: - Name: .data
-ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: - Type: STT_SECTION
ELF-MIPSEL-NEXT: Section: .data
-ELF-MIPSEL-NEXT: - Name: .bss
-ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: - Type: STT_SECTION
ELF-MIPSEL-NEXT: Section: .bss
-ELF-MIPSEL-NEXT: - Name: .mdebug.abi32
-ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: - Type: STT_SECTION
ELF-MIPSEL-NEXT: Section: .mdebug.abi32
-ELF-MIPSEL-NEXT: - Name: .rodata.str1.1
-ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: - Type: STT_SECTION
ELF-MIPSEL-NEXT: Section: .rodata.str1.1
-ELF-MIPSEL-NEXT: - Name: .reginfo
-ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: - Type: STT_SECTION
ELF-MIPSEL-NEXT: Section: .reginfo
-ELF-MIPSEL-NEXT: - Name: .MIPS.abiflags
-ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: - Type: STT_SECTION
ELF-MIPSEL-NEXT: Section: .MIPS.abiflags
ELF-MIPSEL-NEXT: Global:
ELF-MIPSEL-NEXT: - Name: main
@@ -343,22 +336,17 @@ ELF-MIPS64EL-NEXT: AddressAlign: 0x0000000000000004
ELF-MIPS64EL-NEXT: Content: ''
ELF-MIPS64EL-NEXT: Symbols:
ELF-MIPS64EL-NEXT: Local:
-ELF-MIPS64EL-NEXT: - Name: .text
-ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: - Type: STT_SECTION
ELF-MIPS64EL-NEXT: Section: .text
-ELF-MIPS64EL-NEXT: - Name: .data
-ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: - Type: STT_SECTION
ELF-MIPS64EL-NEXT: Section: .data
-ELF-MIPS64EL-NEXT: - Name: .bss
-ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: - Type: STT_SECTION
ELF-MIPS64EL-NEXT: Section: .bss
ELF-MIPS64EL-NEXT: - Name: bar
ELF-MIPS64EL-NEXT: Section: .data
-ELF-MIPS64EL-NEXT: - Name: .MIPS.options
-ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: - Type: STT_SECTION
ELF-MIPS64EL-NEXT: Section: .MIPS.options
-ELF-MIPS64EL-NEXT: - Name: .pdr
-ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: - Type: STT_SECTION
ELF-MIPS64EL-NEXT: Section: .pdr
ELF-MIPS64EL-NEXT: Global:
ELF-MIPS64EL-NEXT: - Name: zed
@@ -394,7 +382,7 @@ ELF-X86-64-NEXT: AddressAlign: 0x0000000000000008
ELF-X86-64-NEXT: Info: .text
ELF-X86-64-NEXT: Relocations:
ELF-X86-64-NEXT: - Offset: 0x000000000000000D
-ELF-X86-64-NEXT: Symbol: .rodata.str1.1
+ELF-X86-64-NEXT: Symbol: ''
ELF-X86-64-NEXT: Type: R_X86_64_32S
ELF-X86-64-NEXT: - Offset: 0x0000000000000012
ELF-X86-64-NEXT: Symbol: puts
@@ -408,14 +396,11 @@ ELF-X86-64-NEXT: Symbols:
ELF-X86-64-NEXT: Local:
ELF-X86-64-NEXT: - Name: trivial-object-test.s
ELF-X86-64-NEXT: Type: STT_FILE
-ELF-X86-64-NEXT: - Name: .text
-ELF-X86-64-NEXT: Type: STT_SECTION
+ELF-X86-64-NEXT: - Type: STT_SECTION
ELF-X86-64-NEXT: Section: .text
-ELF-X86-64-NEXT: - Name: .rodata.str1.1
-ELF-X86-64-NEXT: Type: STT_SECTION
+ELF-X86-64-NEXT: - Type: STT_SECTION
ELF-X86-64-NEXT: Section: .rodata.str1.1
-ELF-X86-64-NEXT: - Name: .note.GNU-stack
-ELF-X86-64-NEXT: Type: STT_SECTION
+ELF-X86-64-NEXT: - Type: STT_SECTION
ELF-X86-64-NEXT: Section: .note.GNU-stack
ELF-X86-64-NEXT: Global:
ELF-X86-64-NEXT: - Name: main
diff --git a/test/Object/readobj-shared-object.test b/test/Object/readobj-shared-object.test
index 516d4c699e420..508caca9717dc 100644
--- a/test/Object/readobj-shared-object.test
+++ b/test/Object/readobj-shared-object.test
@@ -128,61 +128,61 @@ ELF: ]
ELF: Symbols [
ELF: Symbol {
-ELF: Name: .hash
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .hash
ELF: }
ELF: Symbol {
-ELF: Name: .dynsym
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .dynsym
ELF: }
ELF: Symbol {
-ELF: Name: .dynstr
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .dynstr
ELF: }
ELF: Symbol {
-ELF: Name: .text
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .text
ELF: }
ELF: Symbol {
-ELF: Name: .eh_frame
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .eh_frame
ELF: }
ELF: Symbol {
-ELF: Name: .tdata
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .tdata
ELF: }
ELF: Symbol {
-ELF: Name: .dynamic
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .dynamic
ELF: }
ELF: Symbol {
-ELF: Name: .got.plt
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .got.plt
ELF: }
ELF: Symbol {
-ELF: Name: .data
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .data
ELF: }
ELF: Symbol {
-ELF: Name: .bss
+ELF: Name: (0)
ELF: Binding: Local
ELF: Type: Section
ELF: Section: .bss
diff --git a/test/Transforms/CorrelatedValuePropagation/select.ll b/test/Transforms/CorrelatedValuePropagation/select.ll
index 5501438f690f5..d88e3e462a205 100644
--- a/test/Transforms/CorrelatedValuePropagation/select.ll
+++ b/test/Transforms/CorrelatedValuePropagation/select.ll
@@ -51,3 +51,25 @@ else:
ret i8 %b
}
+@c = global i32 0, align 4
+@b = global i32 0, align 4
+
+; CHECK-LABEL: @PR23752(
+define i32 @PR23752() {
+entry:
+ br label %for.body
+
+for.body:
+ %phi = phi i32 [ 0, %entry ], [ %sel, %for.body ]
+ %sel = select i1 icmp sgt (i32* @b, i32* @c), i32 %phi, i32 1
+ %cmp = icmp ne i32 %sel, 1
+ br i1 %cmp, label %for.body, label %if.end
+
+; CHECK: %[[sel:.*]] = select i1 icmp sgt (i32* @b, i32* @c), i32 0, i32 1
+; CHECK-NEXT: %[[cmp:.*]] = icmp ne i32 %[[sel]], 1
+; CHECK-NEXT: br i1 %[[cmp]]
+
+if.end:
+ ret i32 %sel
+; CHECK: ret i32 %[[sel]]
+}
diff --git a/test/Transforms/GVN/unreachable_block_infinite_loop.ll b/test/Transforms/GVN/unreachable_block_infinite_loop.ll
index fca5a28b38dd6..a47e9e4c3a044 100644
--- a/test/Transforms/GVN/unreachable_block_infinite_loop.ll
+++ b/test/Transforms/GVN/unreachable_block_infinite_loop.ll
@@ -12,3 +12,32 @@ unreachable_block:
ret i32 %a
}
+define i32 @pr23096_test0() {
+entry:
+ br label %bb0
+
+bb1:
+ %ptr1 = ptrtoint i32* %ptr2 to i64
+ %ptr2 = inttoptr i64 %ptr1 to i32*
+ br i1 undef, label %bb0, label %bb1
+
+bb0:
+ %phi = phi i32* [ undef, %entry ], [ %ptr2, %bb1 ]
+ %load = load i32, i32* %phi
+ ret i32 %load
+}
+
+define i32 @pr23096_test1() {
+entry:
+ br label %bb0
+
+bb1:
+ %ptr1 = getelementptr i32, i32* %ptr2, i32 0
+ %ptr2 = getelementptr i32, i32* %ptr1, i32 0
+ br i1 undef, label %bb0, label %bb1
+
+bb0:
+ %phi = phi i32* [ undef, %entry ], [ %ptr2, %bb1 ]
+ %load = load i32, i32* %phi
+ ret i32 %load
+}
diff --git a/test/Transforms/IndVarSimplify/exit_value_test2.ll b/test/Transforms/IndVarSimplify/exit_value_test2.ll
new file mode 100644
index 0000000000000..24e3e95a89182
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/exit_value_test2.ll
@@ -0,0 +1,52 @@
+; PR23538
+; RUN: opt < %s -indvars -loop-deletion -S | FileCheck %s
+
+; Check IndVarSimplify should not replace exit value because or else
+; udiv will be introduced by expand and the cost will be high.
+;
+; CHECK-LABEL: @_Z3fooPKcjj(
+; CHECK-NOT: udiv
+
+declare void @_Z3mixRjj(i32* dereferenceable(4), i32)
+declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.end(i64, i8* nocapture)
+
+define i32 @_Z3fooPKcjj(i8* nocapture readonly %s, i32 %len, i32 %c) {
+entry:
+ %a = alloca i32, align 4
+ %tmp = bitcast i32* %a to i8*
+ call void @llvm.lifetime.start(i64 4, i8* %tmp)
+ store i32 -1640531527, i32* %a, align 4
+ %cmp8 = icmp ugt i32 %len, 11
+ br i1 %cmp8, label %while.body.lr.ph, label %while.end
+
+while.body.lr.ph: ; preds = %entry
+ br label %while.body
+
+while.body: ; preds = %while.body, %while.body.lr.ph
+ %keylen.010 = phi i32 [ %len, %while.body.lr.ph ], [ %sub, %while.body ]
+ %s.addr.09 = phi i8* [ %s, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+ %tmp1 = bitcast i8* %s.addr.09 to i32*
+ %tmp2 = load i32, i32* %tmp1, align 4
+ %shl.i = shl i32 %tmp2, 1
+ %and.i = and i32 %shl.i, 16843008
+ %tmp3 = load i32, i32* %a, align 4
+ %sub.i = add i32 %tmp3, %tmp2
+ %add = sub i32 %sub.i, %and.i
+ store i32 %add, i32* %a, align 4
+ %add.ptr = getelementptr inbounds i8, i8* %s.addr.09, i64 12
+ %sub = add i32 %keylen.010, -12
+ %cmp = icmp ugt i32 %sub, 11
+ br i1 %cmp, label %while.body, label %while.cond.while.end_crit_edge
+
+while.cond.while.end_crit_edge: ; preds = %while.body
+ %sub.lcssa = phi i32 [ %sub, %while.body ]
+ br label %while.end
+
+while.end: ; preds = %while.cond.while.end_crit_edge, %entry
+ %keylen.0.lcssa = phi i32 [ %sub.lcssa, %while.cond.while.end_crit_edge ], [ %len, %entry ]
+ call void @_Z3mixRjj(i32* dereferenceable(4) %a, i32 %keylen.0.lcssa)
+ %tmp4 = load i32, i32* %a, align 4
+ call void @llvm.lifetime.end(i64 4, i8* %tmp)
+ ret i32 %tmp4
+}
diff --git a/test/Transforms/IndVarSimplify/exit_value_test3.ll b/test/Transforms/IndVarSimplify/exit_value_test3.ll
new file mode 100644
index 0000000000000..2051d2a46b20a
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/exit_value_test3.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -indvars -loop-deletion -S |FileCheck %s
+
+; Check IndVarSimplify should replace exit value even if the expansion cost
+; is high because the loop can be deleted after the exit value rewrite.
+;
+; CHECK-LABEL: @_Z3fooPKcjj(
+; CHECK: udiv
+; CHECK: [[LABEL:^[a-zA-Z0-9_.]+]]:
+; CHECK-NOT: br {{.*}} [[LABEL]]
+
+define i32 @_Z3fooPKcjj(i8* nocapture readnone %s, i32 %len, i32 %c) #0 {
+entry:
+ br label %while.cond
+
+while.cond: ; preds = %while.cond, %entry
+ %klen.0 = phi i32 [ %len, %entry ], [ %sub, %while.cond ]
+ %cmp = icmp ugt i32 %klen.0, 11
+ %sub = add i32 %klen.0, -12
+ br i1 %cmp, label %while.cond, label %while.end
+
+while.end: ; preds = %while.cond
+ %klen.0.lcssa = phi i32 [ %klen.0, %while.cond ]
+ ret i32 %klen.0.lcssa
+}
diff --git a/test/Transforms/IndVarSimplify/lcssa-preservation.ll b/test/Transforms/IndVarSimplify/lcssa-preservation.ll
index f69c96ce02105..5d502f3b6eafc 100644
--- a/test/Transforms/IndVarSimplify/lcssa-preservation.ll
+++ b/test/Transforms/IndVarSimplify/lcssa-preservation.ll
@@ -1,5 +1,4 @@
-; RUN: opt < %s -indvars -S | FileCheck %s
-;
+; RUN: opt < %s -indvars -replexitval=always -S | FileCheck %s
; Make sure IndVars preserves LCSSA form, especially across loop nests.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/test/Transforms/InstCombine/fpcast.ll b/test/Transforms/InstCombine/fpcast.ll
index 8319624b87c9d..93a64e6b49bd4 100644
--- a/test/Transforms/InstCombine/fpcast.ll
+++ b/test/Transforms/InstCombine/fpcast.ll
@@ -85,3 +85,11 @@ define float @test8(float %V) {
; CHECK-NEXT: %[[trunc:.*]] = fptrunc double %frem to float
; CHECK-NEXT: ret float %trunc
}
+
+; CHECK-LABEL: @test_fptrunc_fptrunc
+; CHECK-NOT: fptrunc double {{.*}} to half
+define half @test_fptrunc_fptrunc(double %V) {
+ %t1 = fptrunc double %V to float
+ %t2 = fptrunc float %t1 to half
+ ret half %t2
+}
diff --git a/test/Transforms/InstCombine/load-bitcast32.ll b/test/Transforms/InstCombine/load-bitcast32.ll
new file mode 100644
index 0000000000000..b1c78a8a314eb
--- /dev/null
+++ b/test/Transforms/InstCombine/load-bitcast32.ll
@@ -0,0 +1,79 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+target datalayout = "p:32:32:32"
+
+
+define i64* @test1(i8* %x) {
+entry:
+; CHECK-LABEL: @test1(
+; CHECK: load i64, i64*
+; CHECK: ret
+ %a = bitcast i8* %x to i64*
+ %b = load i64, i64* %a
+ %c = inttoptr i64 %b to i64*
+
+ ret i64* %c
+}
+
+define i32* @test2(i8* %x) {
+entry:
+; CHECK-LABEL: @test2(
+; CHECK: load i32*, i32**
+; CHECK: ret
+ %a = bitcast i8* %x to i32*
+ %b = load i32, i32* %a
+ %c = inttoptr i32 %b to i32*
+
+ ret i32* %c
+}
+
+define i64* @test3(i8* %x) {
+entry:
+; CHECK-LABEL: @test3(
+; CHECK: load i64*, i64**
+; CHECK: ret
+ %a = bitcast i8* %x to i32*
+ %b = load i32, i32* %a
+ %c = inttoptr i32 %b to i64*
+
+ ret i64* %c
+}
+
+define i64 @test4(i8* %x) {
+entry:
+; CHECK-LABEL: @test4(
+; CHECK: load i32, i32*
+; CHECK: zext
+; CHECK: ret
+ %a = bitcast i8* %x to i64**
+ %b = load i64*, i64** %a
+ %c = ptrtoint i64* %b to i64
+
+ ret i64 %c
+}
+
+define i32 @test5(i8* %x) {
+entry:
+; CHECK-LABEL: @test5(
+; CHECK: load i32, i32*
+; CHECK: ret
+ %a = bitcast i8* %x to i32**
+ %b = load i32*, i32** %a
+ %c = ptrtoint i32* %b to i32
+
+ ret i32 %c
+}
+
+define i64 @test6(i8* %x) {
+entry:
+; CHECK-LABEL: @test6(
+; CHECK: load i32, i32*
+; CHECK: zext
+; CHECK: ret
+ %a = bitcast i8* %x to i32**
+ %b = load i32*, i32** %a
+ %c = ptrtoint i32* %b to i64
+
+ ret i64 %c
+}
+
diff --git a/test/Transforms/InstCombine/load-bitcast64.ll b/test/Transforms/InstCombine/load-bitcast64.ll
new file mode 100644
index 0000000000000..d14c686d83ea3
--- /dev/null
+++ b/test/Transforms/InstCombine/load-bitcast64.ll
@@ -0,0 +1,78 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+target datalayout = "p:64:64:64"
+
+
+define i64* @test1(i8* %x) {
+entry:
+; CHECK-LABEL: @test1(
+; CHECK: load i64*, i64**
+; CHECK: ret
+ %a = bitcast i8* %x to i64*
+ %b = load i64, i64* %a
+ %c = inttoptr i64 %b to i64*
+
+ ret i64* %c
+}
+
+define i32* @test2(i8* %x) {
+entry:
+; CHECK-LABEL: @test2(
+; CHECK: load i32, i32*
+; CHECK: ret
+ %a = bitcast i8* %x to i32*
+ %b = load i32, i32* %a
+ %c = inttoptr i32 %b to i32*
+
+ ret i32* %c
+}
+
+define i64* @test3(i8* %x) {
+entry:
+; CHECK-LABEL: @test3(
+; CHECK: load i32, i32*
+; CHECK: ret
+ %a = bitcast i8* %x to i32*
+ %b = load i32, i32* %a
+ %c = inttoptr i32 %b to i64*
+
+ ret i64* %c
+}
+
+define i64 @test4(i8* %x) {
+entry:
+; CHECK-LABEL: @test4(
+; CHECK: load i64, i64*
+; CHECK: ret
+ %a = bitcast i8* %x to i64**
+ %b = load i64*, i64** %a
+ %c = ptrtoint i64* %b to i64
+
+ ret i64 %c
+}
+
+define i32 @test5(i8* %x) {
+entry:
+; CHECK-LABEL: @test5(
+; CHECK: load i64, i64*
+; CHECK: trunc
+; CHECK: ret
+ %a = bitcast i8* %x to i32**
+ %b = load i32*, i32** %a
+ %c = ptrtoint i32* %b to i32
+
+ ret i32 %c
+}
+
+define i64 @test6(i8* %x) {
+entry:
+; CHECK-LABEL: @test6(
+; CHECK: load i64, i64*
+; CHECK: ret
+ %a = bitcast i8* %x to i32**
+ %b = load i32*, i32** %a
+ %c = ptrtoint i32* %b to i64
+
+ ret i64 %c
+}
+
diff --git a/test/Transforms/InstCombine/pr23751.ll b/test/Transforms/InstCombine/pr23751.ll
new file mode 100644
index 0000000000000..d7840be2f8374
--- /dev/null
+++ b/test/Transforms/InstCombine/pr23751.ll
@@ -0,0 +1,13 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+@d = common global i32 0, align 4
+
+define i1 @f(i8 zeroext %p) #1 {
+; CHECK-NOT: ret i1 false
+ %1 = zext i8 %p to i32
+ %2 = load i32, i32* @d, align 4
+ %3 = or i32 %2, -2
+ %4 = add nsw i32 %3, %1
+ %5 = icmp ugt i32 %1, %4
+ ret i1 %5
+}
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
index e4bc96cff1764..27e487b4815e5 100644
--- a/test/Transforms/InstCombine/select.ll
+++ b/test/Transforms/InstCombine/select.ll
@@ -1532,3 +1532,16 @@ define i32 @test_max_of_min(i32 %a) {
%s1 = select i1 %c1, i32 %s0, i32 -1
ret i32 %s1
}
+
+
+define i32 @PR23757(i32 %x) {
+; CHECK-LABEL: @PR23757
+; CHECK: %[[cmp:.*]] = icmp eq i32 %x, 2147483647
+; CHECK-NEXT: %[[add:.*]] = add nsw i32 %x, 1
+; CHECK-NEXT: %[[sel:.*]] = select i1 %[[cmp]], i32 -2147483648, i32 %[[add]]
+; CHECK-NEXT: ret i32 %[[sel]]
+ %cmp = icmp eq i32 %x, 2147483647
+ %add = add nsw i32 %x, 1
+ %sel = select i1 %cmp, i32 -2147483648, i32 %add
+ ret i32 %sel
+}
diff --git a/test/Transforms/LoopUnroll/full-unroll-bad-cost.ll b/test/Transforms/LoopUnroll/full-unroll-bad-cost.ll
new file mode 100644
index 0000000000000..e5694fbeb0ce5
--- /dev/null
+++ b/test/Transforms/LoopUnroll/full-unroll-bad-cost.ll
@@ -0,0 +1,58 @@
+; RUN: opt -S -loop-unroll < %s | FileCheck %s
+
+; LLVM should not try to fully unroll this loop.
+
+declare void @f()
+declare void @g()
+declare void @h()
+
+define void @trivial_loop() {
+; CHECK-LABEL: @trivial_loop(
+ entry:
+ br label %loop
+
+ loop:
+ %idx = phi i32 [ 0, %entry ], [ %idx.inc, %loop ]
+ %idx.inc = add i32 %idx, 1
+ call void @f()
+ call void @g()
+ call void @h()
+ call void @f()
+ call void @g()
+ call void @h()
+ call void @f()
+ call void @g()
+ call void @h()
+ call void @f()
+ call void @g()
+ call void @h()
+ call void @f()
+ call void @g()
+ call void @h()
+ %be = icmp slt i32 %idx, 268435456
+ br i1 %be, label %loop, label %exit
+
+; CHECK: loop:
+; CHECK-NEXT: %idx = phi i32 [ 0, %entry ], [ %idx.inc, %loop ]
+; CHECK-NEXT: %idx.inc = add i32 %idx, 1
+; CHECK-NEXT: call void @f()
+; CHECK-NEXT: call void @g()
+; CHECK-NEXT: call void @h()
+; CHECK-NEXT: call void @f()
+; CHECK-NEXT: call void @g()
+; CHECK-NEXT: call void @h()
+; CHECK-NEXT: call void @f()
+; CHECK-NEXT: call void @g()
+; CHECK-NEXT: call void @h()
+; CHECK-NEXT: call void @f()
+; CHECK-NEXT: call void @g()
+; CHECK-NEXT: call void @h()
+; CHECK-NEXT: call void @f()
+; CHECK-NEXT: call void @g()
+; CHECK-NEXT: call void @h()
+; CHECK-NEXT: %be = icmp slt i32 %idx, 268435456
+; CHECK-NEXT: br i1 %be, label %loop, label %exit
+
+ exit:
+ ret void
+}
diff --git a/test/Transforms/LoopUnroll/full-unroll-bad-geps.ll b/test/Transforms/LoopUnroll/full-unroll-bad-geps.ll
index 4c99bc73880ba..ac814526647e9 100644
--- a/test/Transforms/LoopUnroll/full-unroll-bad-geps.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-bad-geps.ll
@@ -1,5 +1,5 @@
; Check that we don't crash on corner cases.
-; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-absolute-threshold=10 -unroll-threshold=10 -unroll-percent-of-optimized-for-complete-unroll=20 -o /dev/null
+; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=10 -unroll-percent-dynamic-cost-saved-threshold=20 -o /dev/null
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define void @foo1() {
diff --git a/test/Transforms/LoopUnroll/full-unroll-heuristics.ll b/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
index 2dab2fbf2e498..904a65a1bc0e3 100644
--- a/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
@@ -1,8 +1,8 @@
; In this test we check how heuristics for complete unrolling work. We have
; three knobs:
; 1) -unroll-threshold
-; 2) -unroll-absolute-threshold and
-; 3) -unroll-percent-of-optimized-for-complete-unroll
+; 3) -unroll-percent-dynamic-cost-saved-threshold and
+; 2) -unroll-dynamic-cost-savings-discount
;
; They control loop-unrolling according to the following rules:
; * If size of unrolled loop exceeds the absoulte threshold, we don't unroll
@@ -17,10 +17,10 @@
; optimizations to remove ~55% of the instructions, the loop body size is 9,
; and unrolled size is 65.
-; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-absolute-threshold=10 -unroll-threshold=10 -unroll-percent-of-optimized-for-complete-unroll=20 | FileCheck %s -check-prefix=TEST1
-; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-absolute-threshold=100 -unroll-threshold=10 -unroll-percent-of-optimized-for-complete-unroll=20 | FileCheck %s -check-prefix=TEST2
-; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-absolute-threshold=100 -unroll-threshold=10 -unroll-percent-of-optimized-for-complete-unroll=80 | FileCheck %s -check-prefix=TEST3
-; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-absolute-threshold=100 -unroll-threshold=100 -unroll-percent-of-optimized-for-complete-unroll=80 | FileCheck %s -check-prefix=TEST4
+; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=10 -unroll-percent-dynamic-cost-saved-threshold=20 -unroll-dynamic-cost-savings-discount=0 | FileCheck %s -check-prefix=TEST1
+; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=10 -unroll-percent-dynamic-cost-saved-threshold=20 -unroll-dynamic-cost-savings-discount=90 | FileCheck %s -check-prefix=TEST2
+; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=10 -unroll-percent-dynamic-cost-saved-threshold=80 -unroll-dynamic-cost-savings-discount=90 | FileCheck %s -check-prefix=TEST3
+; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=100 -unroll-percent-dynamic-cost-saved-threshold=80 -unroll-dynamic-cost-savings-discount=0 | FileCheck %s -check-prefix=TEST4
; If the absolute threshold is too low, or if we can't optimize away requested
; percent of instructions, we shouldn't unroll:
diff --git a/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll b/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
index 4cd703f64582a..f16ee4171da92 100644
--- a/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
+++ b/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
@@ -1,5 +1,5 @@
-; RUN: opt -S < %s -loop-vectorize 2>&1 | FileCheck %s
-; RUN: opt -S < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 | FileCheck %s --check-prefix=FORCE-VEC
+; RUN: opt -S < %s -loop-vectorize -force-vector-interleave=2 -force-vector-width=4 -enable-interleaved-mem-accesses=true | FileCheck %s
+; RUN: opt -S < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -enable-interleaved-mem-accesses=true | FileCheck %s --check-prefix=FORCE-VEC
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-gnueabi"
@@ -102,26 +102,23 @@ for.end: ; preds = %for.body
; }
; CHECK-LABEL: @ptr_ind_plus2(
-; CHECK: load i32, i32*
-; CHECK: load i32, i32*
-; CHECK: load i32, i32*
-; CHECK: load i32, i32*
-; CHECK: mul nsw i32
-; CHECK: mul nsw i32
-; CHECK: add nsw i32
-; CHECK: add nsw i32
-; CHECK: %index.next = add i64 %index, 2
-; CHECK: %21 = icmp eq i64 %index.next, 1024
+; CHECK: %[[V0:.*]] = load <8 x i32>
+; CHECK: shufflevector <8 x i32> %[[V0]], <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK: shufflevector <8 x i32> %[[V0]], <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK: %[[V1:.*]] = load <8 x i32>
+; CHECK: shufflevector <8 x i32> %[[V1]], <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK: shufflevector <8 x i32> %[[V1]], <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK: mul nsw <4 x i32>
+; CHECK: mul nsw <4 x i32>
+; CHECK: add nsw <4 x i32>
+; CHECK: add nsw <4 x i32>
+; CHECK: %index.next = add i64 %index, 8
+; CHECK: icmp eq i64 %index.next, 1024
; FORCE-VEC-LABEL: @ptr_ind_plus2(
-; FORCE-VEC: load i32, i32*
-; FORCE-VEC: insertelement <2 x i32>
-; FORCE-VEC: load i32, i32*
-; FORCE-VEC: insertelement <2 x i32>
-; FORCE-VEC: load i32, i32*
-; FORCE-VEC: insertelement <2 x i32>
-; FORCE-VEC: load i32, i32*
-; FORCE-VEC: insertelement <2 x i32>
+; FORCE-VEC: %[[V:.*]] = load <4 x i32>
+; FORCE-VEC: shufflevector <4 x i32> %[[V]], <4 x i32> undef, <2 x i32> <i32 0, i32 2>
+; FORCE-VEC: shufflevector <4 x i32> %[[V]], <4 x i32> undef, <2 x i32> <i32 1, i32 3>
; FORCE-VEC: mul nsw <2 x i32>
; FORCE-VEC: add nsw <2 x i32>
; FORCE-VEC: %index.next = add i64 %index, 2
diff --git a/test/Transforms/LoopVectorize/interleaved-accesses.ll b/test/Transforms/LoopVectorize/interleaved-accesses.ll
new file mode 100644
index 0000000000000..d7237a5c27dc8
--- /dev/null
+++ b/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -0,0 +1,467 @@
+; RUN: opt -S -loop-vectorize -instcombine -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true -runtime-memory-check-threshold=24 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; Check vectorization on an interleaved load group of factor 2 and an interleaved
+; store group of factor 2.
+
+; int AB[1024];
+; int CD[1024];
+; void test_array_load2_store2(int C, int D) {
+; for (int i = 0; i < 1024; i+=2) {
+; int A = AB[i];
+; int B = AB[i+1];
+; CD[i] = A + C;
+; CD[i+1] = B * D;
+; }
+; }
+
+; CHECK-LABEL: @test_array_load2_store2(
+; CHECK: %wide.vec = load <8 x i32>, <8 x i32>* %{{.*}}, align 4
+; CHECK: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK: add nsw <4 x i32>
+; CHECK: mul nsw <4 x i32>
+; CHECK: %interleaved.vec = shufflevector <4 x i32> {{.*}}, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK: store <8 x i32> %interleaved.vec, <8 x i32>* %{{.*}}, align 4
+
+@AB = common global [1024 x i32] zeroinitializer, align 4
+@CD = common global [1024 x i32] zeroinitializer, align 4
+
+define void @test_array_load2_store2(i32 %C, i32 %D) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx0 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx0, align 4
+ %tmp1 = or i64 %indvars.iv, 1
+ %arrayidx1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %tmp1
+ %tmp2 = load i32, i32* %arrayidx1, align 4
+ %add = add nsw i32 %tmp, %C
+ %mul = mul nsw i32 %tmp2, %D
+ %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %tmp1
+ store i32 %mul, i32* %arrayidx3, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp slt i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; int A[3072];
+; struct ST S[1024];
+; void test_struct_st3() {
+; int *ptr = A;
+; for (int i = 0; i < 1024; i++) {
+; int X1 = *ptr++;
+; int X2 = *ptr++;
+; int X3 = *ptr++;
+; T[i].x = X1 + 1;
+; T[i].y = X2 + 2;
+; T[i].z = X3 + 3;
+; }
+; }
+
+; CHECK-LABEL: @test_struct_array_load3_store3(
+; CHECK: %wide.vec = load <12 x i32>, <12 x i32>* {{.*}}, align 4
+; CHECK: shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+; CHECK: shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+; CHECK: shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
+; CHECK: add nsw <4 x i32> {{.*}}, <i32 1, i32 1, i32 1, i32 1>
+; CHECK: add nsw <4 x i32> {{.*}}, <i32 2, i32 2, i32 2, i32 2>
+; CHECK: add nsw <4 x i32> {{.*}}, <i32 3, i32 3, i32 3, i32 3>
+; CHECK: shufflevector <4 x i32> {{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK: shufflevector <4 x i32> {{.*}}, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK: %interleaved.vec = shufflevector <8 x i32> {{.*}}, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+; CHECK: store <12 x i32> %interleaved.vec, <12 x i32>* {{.*}}, align 4
+
+%struct.ST3 = type { i32, i32, i32 }
+@A = common global [3072 x i32] zeroinitializer, align 4
+@S = common global [1024 x %struct.ST3] zeroinitializer, align 4
+
+define void @test_struct_array_load3_store3() {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %ptr.016 = phi i32* [ getelementptr inbounds ([3072 x i32], [3072 x i32]* @A, i64 0, i64 0), %entry ], [ %incdec.ptr2, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, i32* %ptr.016, i64 1
+ %tmp = load i32, i32* %ptr.016, align 4
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %ptr.016, i64 2
+ %tmp1 = load i32, i32* %incdec.ptr, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %ptr.016, i64 3
+ %tmp2 = load i32, i32* %incdec.ptr1, align 4
+ %add = add nsw i32 %tmp, 1
+ %x = getelementptr inbounds [1024 x %struct.ST3], [1024 x %struct.ST3]* @S, i64 0, i64 %indvars.iv, i32 0
+ store i32 %add, i32* %x, align 4
+ %add3 = add nsw i32 %tmp1, 2
+ %y = getelementptr inbounds [1024 x %struct.ST3], [1024 x %struct.ST3]* @S, i64 0, i64 %indvars.iv, i32 1
+ store i32 %add3, i32* %y, align 4
+ %add6 = add nsw i32 %tmp2, 3
+ %z = getelementptr inbounds [1024 x %struct.ST3], [1024 x %struct.ST3]* @S, i64 0, i64 %indvars.iv, i32 2
+ store i32 %add6, i32* %z, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; Check vectorization on an interleaved load group of factor 4.
+
+; struct ST4{
+; int x;
+; int y;
+; int z;
+; int w;
+; };
+; int test_struct_load4(struct ST4 *S) {
+; int r = 0;
+; for (int i = 0; i < 1024; i++) {
+; r += S[i].x;
+; r -= S[i].y;
+; r += S[i].z;
+; r -= S[i].w;
+; }
+; return r;
+; }
+
+; CHECK-LABEL: @test_struct_load4(
+; CHECK: %wide.vec = load <16 x i32>, <16 x i32>* {{.*}}, align 4
+; CHECK: shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+; CHECK: shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
+; CHECK: shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
+; CHECK: shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
+; CHECK: add nsw <4 x i32>
+; CHECK: sub <4 x i32>
+; CHECK: add nsw <4 x i32>
+; CHECK: sub <4 x i32>
+
+%struct.ST4 = type { i32, i32, i32, i32 }
+
+define i32 @test_struct_load4(%struct.ST4* nocapture readonly %S) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %r.022 = phi i32 [ 0, %entry ], [ %sub8, %for.body ]
+ %x = getelementptr inbounds %struct.ST4, %struct.ST4* %S, i64 %indvars.iv, i32 0
+ %tmp = load i32, i32* %x, align 4
+ %add = add nsw i32 %tmp, %r.022
+ %y = getelementptr inbounds %struct.ST4, %struct.ST4* %S, i64 %indvars.iv, i32 1
+ %tmp1 = load i32, i32* %y, align 4
+ %sub = sub i32 %add, %tmp1
+ %z = getelementptr inbounds %struct.ST4, %struct.ST4* %S, i64 %indvars.iv, i32 2
+ %tmp2 = load i32, i32* %z, align 4
+ %add5 = add nsw i32 %sub, %tmp2
+ %w = getelementptr inbounds %struct.ST4, %struct.ST4* %S, i64 %indvars.iv, i32 3
+ %tmp3 = load i32, i32* %w, align 4
+ %sub8 = sub i32 %add5, %tmp3
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret i32 %sub8
+}
+
+; Check vectorization on an interleaved store group of factor 4.
+
+; void test_struct_store4(int *A, struct ST4 *B) {
+; int *ptr = A;
+; for (int i = 0; i < 1024; i++) {
+; int X = *ptr++;
+; B[i].x = X + 1;
+; B[i].y = X * 2;
+; B[i].z = X + 3;
+; B[i].w = X + 4;
+; }
+; }
+
+; CHECK-LABEL: @test_struct_store4(
+; CHECK: %[[LD:.*]] = load <4 x i32>, <4 x i32>*
+; CHECK: add nsw <4 x i32> %[[LD]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK: shl nsw <4 x i32> %[[LD]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK: add nsw <4 x i32> %[[LD]], <i32 3, i32 3, i32 3, i32 3>
+; CHECK: add nsw <4 x i32> %[[LD]], <i32 4, i32 4, i32 4, i32 4>
+; CHECK: shufflevector <4 x i32> {{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK: shufflevector <4 x i32> {{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK: %interleaved.vec = shufflevector <8 x i32> {{.*}}, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+; CHECK: store <16 x i32> %interleaved.vec, <16 x i32>* {{.*}}, align 4
+
+define void @test_struct_store4(i32* noalias nocapture readonly %A, %struct.ST4* noalias nocapture %B) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %ptr.024 = phi i32* [ %A, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, i32* %ptr.024, i64 1
+ %tmp = load i32, i32* %ptr.024, align 4
+ %add = add nsw i32 %tmp, 1
+ %x = getelementptr inbounds %struct.ST4, %struct.ST4* %B, i64 %indvars.iv, i32 0
+ store i32 %add, i32* %x, align 4
+ %mul = shl nsw i32 %tmp, 1
+ %y = getelementptr inbounds %struct.ST4, %struct.ST4* %B, i64 %indvars.iv, i32 1
+ store i32 %mul, i32* %y, align 4
+ %add3 = add nsw i32 %tmp, 3
+ %z = getelementptr inbounds %struct.ST4, %struct.ST4* %B, i64 %indvars.iv, i32 2
+ store i32 %add3, i32* %z, align 4
+ %add6 = add nsw i32 %tmp, 4
+ %w = getelementptr inbounds %struct.ST4, %struct.ST4* %B, i64 %indvars.iv, i32 3
+ store i32 %add6, i32* %w, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; Check vectorization on a reverse interleaved load group of factor 2 and
+; a reverse interleaved store group of factor 2.
+
+; struct ST2 {
+; int x;
+; int y;
+; };
+;
+; void test_reversed_load2_store2(struct ST2 *A, struct ST2 *B) {
+; for (int i = 1023; i >= 0; i--) {
+; int a = A[i].x + i; // interleaved load of index 0
+; int b = A[i].y - i; // interleaved load of index 1
+; B[i].x = a; // interleaved store of index 0
+; B[i].y = b; // interleaved store of index 1
+; }
+; }
+
+; CHECK-LABEL: @test_reversed_load2_store2(
+; CHECK: %wide.vec = load <8 x i32>, <8 x i32>* {{.*}}, align 4
+; CHECK: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK: shufflevector <4 x i32> {{.*}}, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK: shufflevector <4 x i32> {{.*}}, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK: add nsw <4 x i32>
+; CHECK: sub nsw <4 x i32>
+; CHECK: shufflevector <4 x i32> {{.*}}, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK: shufflevector <4 x i32> {{.*}}, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK: %interleaved.vec = shufflevector <4 x i32> {{.*}}, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK: store <8 x i32> %interleaved.vec, <8 x i32>* %{{.*}}, align 4
+
+%struct.ST2 = type { i32, i32 }
+
+define void @test_reversed_load2_store2(%struct.ST2* noalias nocapture readonly %A, %struct.ST2* noalias nocapture %B) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 1023, %entry ], [ %indvars.iv.next, %for.body ]
+ %x = getelementptr inbounds %struct.ST2, %struct.ST2* %A, i64 %indvars.iv, i32 0
+ %tmp = load i32, i32* %x, align 4
+ %tmp1 = trunc i64 %indvars.iv to i32
+ %add = add nsw i32 %tmp, %tmp1
+ %y = getelementptr inbounds %struct.ST2, %struct.ST2* %A, i64 %indvars.iv, i32 1
+ %tmp2 = load i32, i32* %y, align 4
+ %sub = sub nsw i32 %tmp2, %tmp1
+ %x5 = getelementptr inbounds %struct.ST2, %struct.ST2* %B, i64 %indvars.iv, i32 0
+ store i32 %add, i32* %x5, align 4
+ %y8 = getelementptr inbounds %struct.ST2, %struct.ST2* %B, i64 %indvars.iv, i32 1
+ store i32 %sub, i32* %y8, align 4
+ %indvars.iv.next = add nsw i64 %indvars.iv, -1
+ %cmp = icmp sgt i64 %indvars.iv, 0
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; Check vectorization on an interleaved load group of factor 2 with 1 gap
+; (missing the load of odd elements).
+
+; void even_load(int *A, int *B) {
+; for (unsigned i = 0; i < 1024; i+=2)
+; B[i/2] = A[i] * 2;
+; }
+
+; CHECK-LABEL: @even_load(
+; CHECK: %wide.vec = load <8 x i32>, <8 x i32>* %{{.*}}, align 4
+; CHECK: %strided.vec = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NOT: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK: shl nsw <4 x i32> %strided.vec, <i32 1, i32 1, i32 1, i32 1>
+
+define void @even_load(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %mul = shl nsw i32 %tmp, 1
+ %tmp1 = lshr exact i64 %indvars.iv, 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %tmp1
+ store i32 %mul, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; Check vectorization on interleaved access groups identified from mixed
+; loads/stores.
+; void mixed_load2_store2(int *A, int *B) {
+; for (unsigned i = 0; i < 1024; i+=2) {
+; B[i] = A[i] * A[i+1];
+; B[i+1] = A[i] + A[i+1];
+; }
+; }
+
+; CHECK-LABEL: @mixed_load2_store2(
+; CHECK: %wide.vec = load <8 x i32>, <8 x i32>* {{.*}}, align 4
+; CHECK: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK: %interleaved.vec = shufflevector <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK: store <8 x i32> %interleaved.vec
+
+define void @mixed_load2_store2(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %tmp1 = or i64 %indvars.iv, 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %tmp1
+ %tmp2 = load i32, i32* %arrayidx2, align 4
+ %mul = mul nsw i32 %tmp2, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
+ store i32 %mul, i32* %arrayidx4, align 4
+ %tmp3 = load i32, i32* %arrayidx, align 4
+ %tmp4 = load i32, i32* %arrayidx2, align 4
+ %add10 = add nsw i32 %tmp4, %tmp3
+ %arrayidx13 = getelementptr inbounds i32, i32* %B, i64 %tmp1
+ store i32 %add10, i32* %arrayidx13, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
+ %cmp = icmp ult i64 %indvars.iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+; Check vectorization on interleaved access groups identified from mixed
+; loads/stores.
+; void mixed_load3_store3(int *A) {
+; for (unsigned i = 0; i < 1024; i++) {
+; *A++ += i;
+; *A++ += i;
+; *A++ += i;
+; }
+; }
+
+; CHECK-LABEL: @mixed_load3_store3(
+; CHECK: %wide.vec = load <12 x i32>, <12 x i32>* {{.*}}, align 4
+; CHECK: shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+; CHECK: shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+; CHECK: shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
+; CHECK: %interleaved.vec = shufflevector <8 x i32> %{{.*}}, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+; CHECK: store <12 x i32> %interleaved.vec, <12 x i32>* %{{.*}}, align 4
+
+define void @mixed_load3_store3(i32* nocapture %A) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %i.013 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %A.addr.012 = phi i32* [ %A, %entry ], [ %incdec.ptr3, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, i32* %A.addr.012, i64 1
+ %tmp = load i32, i32* %A.addr.012, align 4
+ %add = add i32 %tmp, %i.013
+ store i32 %add, i32* %A.addr.012, align 4
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %A.addr.012, i64 2
+ %tmp1 = load i32, i32* %incdec.ptr, align 4
+ %add2 = add i32 %tmp1, %i.013
+ store i32 %add2, i32* %incdec.ptr, align 4
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %A.addr.012, i64 3
+ %tmp2 = load i32, i32* %incdec.ptr1, align 4
+ %add4 = add i32 %tmp2, %i.013
+ store i32 %add4, i32* %incdec.ptr1, align 4
+ %inc = add nuw nsw i32 %i.013, 1
+ %exitcond = icmp eq i32 %inc, 1024
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; Check vectorization on interleaved access groups with members having different
+; kinds of type.
+
+; struct IntFloat {
+; int a;
+; float b;
+; };
+;
+; int SA;
+; float SB;
+;
+; void int_float_struct(struct IntFloat *A) {
+; int SumA;
+; float SumB;
+; for (unsigned i = 0; i < 1024; i++) {
+; SumA += A[i].a;
+; SumB += A[i].b;
+; }
+; SA = SumA;
+; SB = SumB;
+; }
+
+; CHECK-LABEL: @int_float_struct(
+; CHECK: %wide.vec = load <8 x i32>, <8 x i32>* %{{.*}}, align 4
+; CHECK: %[[V0:.*]] = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK: %[[V1:.*]] = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK: bitcast <4 x i32> %[[V1]] to <4 x float>
+; CHECK: add nsw <4 x i32>
+; CHECK: fadd fast <4 x float>
+
+%struct.IntFloat = type { i32, float }
+
+@SA = common global i32 0, align 4
+@SB = common global float 0.000000e+00, align 4
+
+define void @int_float_struct(%struct.IntFloat* nocapture readonly %A) #0 {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ store i32 %add, i32* @SA, align 4
+ store float %add3, float* @SB, align 4
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %SumB.014 = phi float [ undef, %entry ], [ %add3, %for.body ]
+ %SumA.013 = phi i32 [ undef, %entry ], [ %add, %for.body ]
+ %a = getelementptr inbounds %struct.IntFloat, %struct.IntFloat* %A, i64 %indvars.iv, i32 0
+ %tmp = load i32, i32* %a, align 4
+ %add = add nsw i32 %tmp, %SumA.013
+ %b = getelementptr inbounds %struct.IntFloat, %struct.IntFloat* %A, i64 %indvars.iv, i32 1
+ %tmp1 = load float, float* %b, align 4
+ %add3 = fadd fast float %SumB.014, %tmp1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+attributes #0 = { "unsafe-fp-math"="true" }
diff --git a/test/Transforms/LoopVectorize/zero-sized-pointee-crash.ll b/test/Transforms/LoopVectorize/zero-sized-pointee-crash.ll
new file mode 100644
index 0000000000000..8771dd26948b5
--- /dev/null
+++ b/test/Transforms/LoopVectorize/zero-sized-pointee-crash.ll
@@ -0,0 +1,27 @@
+; RUN: opt -S -loop-vectorize < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: @fn1
+define void @fn1() {
+entry-block:
+ br label %middle
+
+middle:
+ %0 = phi {}* [ %3, %middle ], [ inttoptr (i64 0 to {}*), %entry-block ]
+ %1 = bitcast {}* %0 to i8*
+ %2 = getelementptr i8, i8* %1, i64 1
+ %3 = bitcast i8* %2 to {}*
+ %4 = icmp eq i8* %2, undef
+ br i1 %4, label %exit, label %middle
+
+; CHECK: %[[phi:.*]] = phi {}* [ %3, %middle ], [ null, %entry-block ]
+; CHECK-NEXT: %[[bc1:.*]] = bitcast {}* %[[phi]] to i8*
+; CHECK-NEXT: %[[gep:.*]] = getelementptr i8, i8* %[[bc1]], i64 1
+; CHECK-NEXT: %[[bc2:.*]] = bitcast i8* %[[gep]] to {}*
+; CHECK-NEXT: %[[cmp:.*]] = icmp eq i8* %[[gep]], undef
+; CHECK-NEXT: br i1 %[[cmp]],
+
+exit:
+ ret void
+}
diff --git a/test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll b/test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll
index b2083cb5c700e..99eba5e280944 100644
--- a/test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll
+++ b/test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll
@@ -63,16 +63,16 @@ lpad:
resume { i8*, i32 } zeroinitializer
}
-define i8 @call_same_range() {
-; CHECK-LABEL: @call_same_range
+define i8 @call_with_same_range() {
+; CHECK-LABEL: @call_with_same_range
; CHECK: tail call i8 @call_with_range
bitcast i8 0 to i8
%out = call i8 @dummy(), !range !0
ret i8 %out
}
-define i8 @invoke_same_range() {
-; CHECK-LABEL: @invoke_same_range()
+define i8 @invoke_with_same_range() {
+; CHECK-LABEL: @invoke_with_same_range()
; CHECK: tail call i8 @invoke_with_range()
%out = invoke i8 @dummy() to label %next unwind label %lpad, !range !0
diff --git a/test/Transforms/MergeFunc/linkonce_odr.ll b/test/Transforms/MergeFunc/linkonce_odr.ll
new file mode 100644
index 0000000000000..1ad0d727f83b8
--- /dev/null
+++ b/test/Transforms/MergeFunc/linkonce_odr.ll
@@ -0,0 +1,30 @@
+; RUN: opt -S -mergefunc < %s | FileCheck %s
+
+; Replacments should be totally ordered on the function name.
+; If we don't do this we can end up with one module defining a thunk for @funA
+; and another module defining a thunk for @funB.
+;
+; The problem with this is that the linker could then choose these two stubs
+; each of the two modules and we end up with two stubs calling each other.
+
+; CHECK-LABEL: define linkonce_odr i32 @funA
+; CHECK-NEXT: add
+; CHECK: ret
+
+; CHECK-LABEL: define linkonce_odr i32 @funB
+; CHECK-NEXT: tail call i32 @funA(i32 %0, i32 %1)
+; CHECK-NEXT: ret
+
+define linkonce_odr i32 @funB(i32 %x, i32 %y) {
+ %sum = add i32 %x, %y
+ %sum2 = add i32 %x, %sum
+ %sum3 = add i32 %x, %sum2
+ ret i32 %sum3
+}
+
+define linkonce_odr i32 @funA(i32 %x, i32 %y) {
+ %sum = add i32 %x, %y
+ %sum2 = add i32 %x, %sum
+ %sum3 = add i32 %x, %sum2
+ ret i32 %sum3
+}
diff --git a/test/Transforms/NaryReassociate/NVPTX/nary-gep.ll b/test/Transforms/NaryReassociate/NVPTX/nary-gep.ll
index a620c98ec18f0..d08c6f60c041a 100644
--- a/test/Transforms/NaryReassociate/NVPTX/nary-gep.ll
+++ b/test/Transforms/NaryReassociate/NVPTX/nary-gep.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -nary-reassociate -S | FileCheck %s
+; RUN: opt < %s -nary-reassociate -early-cse -S | FileCheck %s
target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
target triple = "nvptx64-unknown-unknown"
@@ -27,24 +27,37 @@ define void @reassociate_gep(float* %a, i64 %i, i64 %j) {
; foo(&a[sext(j)]);
; foo(&a[sext(i +nsw j)]);
+; foo(&a[sext((i +nsw j) +nsw i)]);
; =>
-; t = &a[sext(j)];
-; foo(t);
-; foo(t + sext(i));
+; t1 = &a[sext(j)];
+; foo(t1);
+; t2 = t1 + sext(i);
+; foo(t2);
+; t3 = t2 + sext(i); // sext(i) should be GVN'ed.
+; foo(t3);
define void @reassociate_gep_nsw(float* %a, i32 %i, i32 %j) {
; CHECK-LABEL: @reassociate_gep_nsw(
- %1 = add nsw i32 %i, %j
- %idxprom.1 = sext i32 %1 to i64
%idxprom.j = sext i32 %j to i64
- %2 = getelementptr float, float* %a, i64 %idxprom.j
+ %1 = getelementptr float, float* %a, i64 %idxprom.j
; CHECK: [[t1:[^ ]+]] = getelementptr float, float* %a, i64 %idxprom.j
- call void @foo(float* %2)
+ call void @foo(float* %1)
; CHECK: call void @foo(float* [[t1]])
- %3 = getelementptr float, float* %a, i64 %idxprom.1
+
+ %2 = add nsw i32 %i, %j
+ %idxprom.2 = sext i32 %2 to i64
+ %3 = getelementptr float, float* %a, i64 %idxprom.2
; CHECK: [[sexti:[^ ]+]] = sext i32 %i to i64
; CHECK: [[t2:[^ ]+]] = getelementptr float, float* [[t1]], i64 [[sexti]]
call void @foo(float* %3)
; CHECK: call void @foo(float* [[t2]])
+
+ %4 = add nsw i32 %2, %i
+ %idxprom.4 = sext i32 %4 to i64
+ %5 = getelementptr float, float* %a, i64 %idxprom.4
+; CHECK: [[t3:[^ ]+]] = getelementptr float, float* [[t2]], i64 [[sexti]]
+ call void @foo(float* %5)
+; CHECK: call void @foo(float* [[t3]])
+
ret void
}
diff --git a/test/Transforms/Reassociate/basictest.ll b/test/Transforms/Reassociate/basictest.ll
index 015d3b0bee9f2..caaf7726514de 100644
--- a/test/Transforms/Reassociate/basictest.ll
+++ b/test/Transforms/Reassociate/basictest.ll
@@ -202,8 +202,8 @@ define i32 @test14(i32 %X1, i32 %X2) {
ret i32 %D
; CHECK-LABEL: @test14
-; CHECK-NEXT: sub i32 %X1, %X2
-; CHECK-NEXT: mul i32 %B2, 47
+; CHECK-NEXT: %[[SUB:.*]] = sub i32 %X1, %X2
+; CHECK-NEXT: mul i32 %[[SUB]], 47
; CHECK-NEXT: ret i32
}
diff --git a/test/Transforms/Reassociate/canonicalize-neg-const.ll b/test/Transforms/Reassociate/canonicalize-neg-const.ll
index e85a963f6ddac..465460cb53b1b 100644
--- a/test/Transforms/Reassociate/canonicalize-neg-const.ll
+++ b/test/Transforms/Reassociate/canonicalize-neg-const.ll
@@ -49,18 +49,6 @@ define double @test3(double %x, double %y) {
ret double %mul3
}
-; Canonicalize (x - -1234 * y)
-define i64 @test4(i64 %x, i64 %y) {
-; CHECK-LABEL: @test4
-; CHECK-NEXT: mul i64 %y, 1234
-; CHECK-NEXT: add i64 %mul, %x
-; CHECK-NEXT: ret i64 %sub
-
- %mul = mul i64 %y, -1234
- %sub = sub i64 %x, %mul
- ret i64 %sub
-}
-
; Canonicalize (x - -0.1234 * y)
define double @test5(double %x, double %y) {
; CHECK-LABEL: @test5
@@ -156,3 +144,13 @@ define double @test12(double %x, double %y) {
%add = fadd double %div, %x
ret double %add
}
+
+; Don't create an NSW violation
+define i4 @test13(i4 %x) {
+; CHECK-LABEL: @test13
+; CHECK-NEXT: %[[mul:.*]] = mul nsw i4 %x, -2
+; CHECK-NEXT: %[[add:.*]] = add i4 %[[mul]], 3
+ %mul = mul nsw i4 %x, -2
+ %add = add i4 %mul, 3
+ ret i4 %add
+}
diff --git a/test/Transforms/RewriteStatepointsForGC/deref-pointers.ll b/test/Transforms/RewriteStatepointsForGC/deref-pointers.ll
new file mode 100644
index 0000000000000..5913db21fcf38
--- /dev/null
+++ b/test/Transforms/RewriteStatepointsForGC/deref-pointers.ll
@@ -0,0 +1,77 @@
+; RUN: opt -S -rewrite-statepoints-for-gc < %s | FileCheck %s
+
+declare void @foo()
+declare i8 addrspace(1)* @some_function()
+declare void @some_function_consumer(i8 addrspace(1)*)
+declare dereferenceable(4) i8 addrspace(1)* @some_function_ret_deref()
+; CHECK: declare i8 addrspace(1)* @some_function_ret_deref()
+
+define i8 addrspace(1)* @test_deref_arg(i8 addrspace(1)* dereferenceable(4) %a) gc "statepoint-example" {
+; CHECK: define i8 addrspace(1)* @test_deref_arg(i8 addrspace(1)* %a)
+entry:
+ call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %a
+}
+
+define i8 addrspace(1)* @test_deref_or_null_arg(i8 addrspace(1)* dereferenceable_or_null(4) %a) gc "statepoint-example" {
+; CHECK: define i8 addrspace(1)* @test_deref_or_null_arg(i8 addrspace(1)* %a)
+entry:
+ call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %a
+}
+
+define i8 addrspace(1)* @test_deref_retval() gc "statepoint-example" {
+; CHECK-LABEL: @test_deref_retval(
+entry:
+ %a = call dereferenceable(4) i8 addrspace(1)* @some_function()
+; CHECK: %a = call i8 addrspace(1)* @some_function()
+ call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %a
+}
+
+define i8 addrspace(1)* @test_deref_or_null_retval() gc "statepoint-example" {
+; CHECK-LABEL: @test_deref_or_null_retval(
+entry:
+ %a = call dereferenceable_or_null(4) i8 addrspace(1)* @some_function()
+; CHECK: %a = call i8 addrspace(1)* @some_function()
+ call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %a
+}
+
+define i8 @test_md(i8 addrspace(1)* %ptr) gc "statepoint-example" {
+; CHECK-LABEL: @test_md(
+ entry:
+; CHECK: %tmp = load i8, i8 addrspace(1)* %ptr, !tbaa !0
+ %tmp = load i8, i8 addrspace(1)* %ptr, !tbaa !0
+ call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 %tmp
+}
+
+define i8 addrspace(1)* @test_decl_only_attribute(i8 addrspace(1)* %ptr) gc "statepoint-example" {
+; CHECK-LABEL: @test_decl_only_attribute(
+entry:
+; No change here, but the prototype of some_function_ret_deref should have changed.
+; CHECK: call i8 addrspace(1)* @some_function_ret_deref()
+ %a = call i8 addrspace(1)* @some_function_ret_deref()
+ call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %a
+}
+
+define i8 addrspace(1)* @test_callsite_arg_attribute(i8 addrspace(1)* %ptr) gc "statepoint-example" {
+; CHECK-LABEL: @test_callsite_arg_attribute(
+entry:
+; CHECK: call void @some_function_consumer(i8 addrspace(1)* %ptr)
+ call void @some_function_consumer(i8 addrspace(1)* dereferenceable(4) %ptr)
+ call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+ ret i8 addrspace(1)* %ptr
+}
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+
+!0 = !{!1, !1, i64 0, i64 1}
+!1 = !{!"red", !2}
+!2 = !{!"blue"}
+
+; CHECK: !0 = !{!1, !1, i64 0}
+; CHECK: !1 = !{!"red", !2}
+; CHECK: !2 = !{!"blue"}
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/R600/lit.local.cfg b/test/Transforms/SeparateConstOffsetFromGEP/R600/lit.local.cfg
new file mode 100644
index 0000000000000..4086e8d681c38
--- /dev/null
+++ b/test/Transforms/SeparateConstOffsetFromGEP/R600/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'R600' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/R600/split-gep-and-gvn-addrspace-addressing-modes.ll b/test/Transforms/SeparateConstOffsetFromGEP/R600/split-gep-and-gvn-addrspace-addressing-modes.ll
new file mode 100644
index 0000000000000..527634db0f5b8
--- /dev/null
+++ b/test/Transforms/SeparateConstOffsetFromGEP/R600/split-gep-and-gvn-addrspace-addressing-modes.ll
@@ -0,0 +1,94 @@
+; RUN: opt -mtriple=amdgcn-- -S -separate-const-offset-from-gep -reassociate-geps-verify-no-dead-code -gvn < %s | FileCheck -check-prefix=IR %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+@array = internal addrspace(2) constant [4096 x [32 x float]] zeroinitializer, align 4
+
+; IR-LABEL: @sum_of_array(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(2)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(2)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(2)* [[BASE_PTR]], i64 33
+define void @sum_of_array(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
+ %tmp = sext i32 %y to i64
+ %tmp1 = sext i32 %x to i64
+ %tmp2 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp1, i64 %tmp
+ %tmp4 = load float, float addrspace(2)* %tmp2, align 4
+ %tmp5 = fadd float %tmp4, 0.000000e+00
+ %tmp6 = add i32 %y, 1
+ %tmp7 = sext i32 %tmp6 to i64
+ %tmp8 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp1, i64 %tmp7
+ %tmp10 = load float, float addrspace(2)* %tmp8, align 4
+ %tmp11 = fadd float %tmp5, %tmp10
+ %tmp12 = add i32 %x, 1
+ %tmp13 = sext i32 %tmp12 to i64
+ %tmp14 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp13, i64 %tmp
+ %tmp16 = load float, float addrspace(2)* %tmp14, align 4
+ %tmp17 = fadd float %tmp11, %tmp16
+ %tmp18 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp13, i64 %tmp7
+ %tmp20 = load float, float addrspace(2)* %tmp18, align 4
+ %tmp21 = fadd float %tmp17, %tmp20
+ store float %tmp21, float addrspace(1)* %output, align 4
+ ret void
+}
+
+@array2 = internal addrspace(2) constant [4096 x [4 x float]] zeroinitializer, align 4
+
+; Some of the indices go over the maximum mubuf offset, so don't split them.
+
+; IR-LABEL: @sum_of_array_over_max_mubuf_offset(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(2)* [[BASE_PTR]], i64 255
+; IR: add i32 %x, 256
+; IR: getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+define void @sum_of_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
+ %tmp = sext i32 %y to i64
+ %tmp1 = sext i32 %x to i64
+ %tmp2 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp1, i64 %tmp
+ %tmp4 = load float, float addrspace(2)* %tmp2, align 4
+ %tmp5 = fadd float %tmp4, 0.000000e+00
+ %tmp6 = add i32 %y, 255
+ %tmp7 = sext i32 %tmp6 to i64
+ %tmp8 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp1, i64 %tmp7
+ %tmp10 = load float, float addrspace(2)* %tmp8, align 4
+ %tmp11 = fadd float %tmp5, %tmp10
+ %tmp12 = add i32 %x, 256
+ %tmp13 = sext i32 %tmp12 to i64
+ %tmp14 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp13, i64 %tmp
+ %tmp16 = load float, float addrspace(2)* %tmp14, align 4
+ %tmp17 = fadd float %tmp11, %tmp16
+ %tmp18 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp13, i64 %tmp7
+ %tmp20 = load float, float addrspace(2)* %tmp18, align 4
+ %tmp21 = fadd float %tmp17, %tmp20
+ store float %tmp21, float addrspace(1)* %output, align 4
+ ret void
+}
+
+
+@lds_array = internal addrspace(3) global [4096 x [4 x float]] undef, align 4
+
+; DS instructions have a larger immediate offset, so make sure these are OK.
+; IR-LABEL: @sum_of_lds_array_over_max_mubuf_offset(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %{{[a-zA-Z0-9]+}}, i32 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i32 255
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i32 16128
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i32 16383
+define void @sum_of_lds_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
+ %tmp2 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %x, i32 %y
+ %tmp4 = load float, float addrspace(3)* %tmp2, align 4
+ %tmp5 = fadd float %tmp4, 0.000000e+00
+ %tmp6 = add i32 %y, 255
+ %tmp8 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %x, i32 %tmp6
+ %tmp10 = load float, float addrspace(3)* %tmp8, align 4
+ %tmp11 = fadd float %tmp5, %tmp10
+ %tmp12 = add i32 %x, 4032
+ %tmp14 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %tmp12, i32 %y
+ %tmp16 = load float, float addrspace(3)* %tmp14, align 4
+ %tmp17 = fadd float %tmp11, %tmp16
+ %tmp18 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %tmp12, i32 %tmp6
+ %tmp20 = load float, float addrspace(3)* %tmp18, align 4
+ %tmp21 = fadd float %tmp17, %tmp20
+ store float %tmp21, float addrspace(1)* %output, align 4
+ ret void
+}
diff --git a/test/Transforms/Sink/convergent.ll b/test/Transforms/Sink/convergent.ll
new file mode 100644
index 0000000000000..49207dbc99276
--- /dev/null
+++ b/test/Transforms/Sink/convergent.ll
@@ -0,0 +1,24 @@
+; RUN: opt -sink -S < %s | FileCheck %s
+
+; Verify that IR sinking does not move convergent operations to
+; blocks that are not control equivalent.
+
+; CHECK: define i32 @foo
+; CHECK: entry
+; CHECK-NEXT: call i32 @bar
+; CHECK-NEXT: br i1 %arg
+
+define i32 @foo(i1 %arg) {
+entry:
+ %c = call i32 @bar() readonly convergent
+ br i1 %arg, label %then, label %end
+
+then:
+ ret i32 %c
+
+end:
+ ret i32 0
+}
+
+declare i32 @bar() readonly convergent
+
diff --git a/test/tools/dsymutil/Inputs/frame-dw2.ll b/test/tools/dsymutil/Inputs/frame-dw2.ll
new file mode 100644
index 0000000000000..7ffc933973158
--- /dev/null
+++ b/test/tools/dsymutil/Inputs/frame-dw2.ll
@@ -0,0 +1,71 @@
+; Generated from frame.c on Darwin with '-arch i386 -g -emit-llvm'
+; ModuleID = 'frame.c'
+target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
+target triple = "i386-apple-macosx10.11.0"
+
+; Function Attrs: nounwind ssp
+define i32 @bar(i32 %b) #0 {
+entry:
+ %b.addr = alloca i32, align 4
+ %var = alloca i32, align 4
+ store i32 %b, i32* %b.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %b.addr, metadata !13, metadata !14), !dbg !15
+ call void @llvm.dbg.declare(metadata i32* %var, metadata !16, metadata !14), !dbg !17
+ %0 = load i32, i32* %b.addr, align 4, !dbg !18
+ %add = add nsw i32 %0, 1, !dbg !19
+ store i32 %add, i32* %var, align 4, !dbg !17
+ %call = call i32 @foo(i32* %var), !dbg !20
+ ret i32 %call, !dbg !21
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+declare i32 @foo(i32*) #2
+
+; Function Attrs: nounwind ssp
+define i32 @baz(i32 %b) #0 {
+entry:
+ %b.addr = alloca i32, align 4
+ store i32 %b, i32* %b.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %b.addr, metadata !22, metadata !14), !dbg !23
+ %0 = load i32, i32* %b.addr, align 4, !dbg !24
+ %call = call i32 @bar(i32 %0), !dbg !25
+ ret i32 %call, !dbg !26
+}
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="yonah" "target-features"="+cx16,+sse,+sse2,+sse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="yonah" "target-features"="+cx16,+sse,+sse2,+sse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10, !11}
+!llvm.ident = !{!12}
+
+!0 = !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.7.0 (trunk 239176) (llvm/trunk 239190)", isOptimized: false, runtimeVersion: 0, emissionKind: 1, enums: !2, retainedTypes: !2, subprograms: !3, globals: !2, imports: !2)
+!1 = !DIFile(filename: "frame.c", directory: "/tmp")
+!2 = !{}
+!3 = !{!4, !8}
+!4 = !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: false, function: i32 (i32)* @bar, variables: !2)
+!5 = !DISubroutineType(types: !6)
+!6 = !{!7, !7}
+!7 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!8 = !DISubprogram(name: "baz", scope: !1, file: !1, line: 8, type: !5, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: false, function: i32 (i32)* @baz, variables: !2)
+!9 = !{i32 2, !"Dwarf Version", i32 2}
+!10 = !{i32 2, !"Debug Info Version", i32 3}
+!11 = !{i32 1, !"PIC Level", i32 2}
+!12 = !{!"clang version 3.7.0 (trunk 239176) (llvm/trunk 239190)"}
+!13 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "b", arg: 1, scope: !4, file: !1, line: 3, type: !7)
+!14 = !DIExpression()
+!15 = !DILocation(line: 3, column: 13, scope: !4)
+!16 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "var", scope: !4, file: !1, line: 4, type: !7)
+!17 = !DILocation(line: 4, column: 6, scope: !4)
+!18 = !DILocation(line: 4, column: 12, scope: !4)
+!19 = !DILocation(line: 4, column: 14, scope: !4)
+!20 = !DILocation(line: 5, column: 9, scope: !4)
+!21 = !DILocation(line: 5, column: 2, scope: !4)
+!22 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "b", arg: 1, scope: !8, file: !1, line: 8, type: !7)
+!23 = !DILocation(line: 8, column: 13, scope: !8)
+!24 = !DILocation(line: 9, column: 13, scope: !8)
+!25 = !DILocation(line: 9, column: 9, scope: !8)
+!26 = !DILocation(line: 9, column: 2, scope: !8)
diff --git a/test/tools/dsymutil/Inputs/frame-dw4.ll b/test/tools/dsymutil/Inputs/frame-dw4.ll
new file mode 100644
index 0000000000000..c8674b13e585f
--- /dev/null
+++ b/test/tools/dsymutil/Inputs/frame-dw4.ll
@@ -0,0 +1,71 @@
+; Generated from frame.c on Darwin with '-arch i386 -gdwarf-4 -emit-llvm'
+; ModuleID = 'frame.c'
+target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
+target triple = "i386-apple-macosx10.11.0"
+
+; Function Attrs: nounwind ssp
+define i32 @bar(i32 %b) #0 {
+entry:
+ %b.addr = alloca i32, align 4
+ %var = alloca i32, align 4
+ store i32 %b, i32* %b.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %b.addr, metadata !13, metadata !14), !dbg !15
+ call void @llvm.dbg.declare(metadata i32* %var, metadata !16, metadata !14), !dbg !17
+ %0 = load i32, i32* %b.addr, align 4, !dbg !18
+ %add = add nsw i32 %0, 1, !dbg !19
+ store i32 %add, i32* %var, align 4, !dbg !17
+ %call = call i32 @foo(i32* %var), !dbg !20
+ ret i32 %call, !dbg !21
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+declare i32 @foo(i32*) #2
+
+; Function Attrs: nounwind ssp
+define i32 @baz(i32 %b) #0 {
+entry:
+ %b.addr = alloca i32, align 4
+ store i32 %b, i32* %b.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %b.addr, metadata !22, metadata !14), !dbg !23
+ %0 = load i32, i32* %b.addr, align 4, !dbg !24
+ %call = call i32 @bar(i32 %0), !dbg !25
+ ret i32 %call, !dbg !26
+}
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="yonah" "target-features"="+cx16,+sse,+sse2,+sse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="yonah" "target-features"="+cx16,+sse,+sse2,+sse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10, !11}
+!llvm.ident = !{!12}
+
+!0 = !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.7.0 (trunk 239176) (llvm/trunk 239190)", isOptimized: false, runtimeVersion: 0, emissionKind: 1, enums: !2, retainedTypes: !2, subprograms: !3, globals: !2, imports: !2)
+!1 = !DIFile(filename: "frame.c", directory: "/tmp")
+!2 = !{}
+!3 = !{!4, !8}
+!4 = !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: false, function: i32 (i32)* @bar, variables: !2)
+!5 = !DISubroutineType(types: !6)
+!6 = !{!7, !7}
+!7 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!8 = !DISubprogram(name: "baz", scope: !1, file: !1, line: 8, type: !5, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: false, function: i32 (i32)* @baz, variables: !2)
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 2, !"Debug Info Version", i32 3}
+!11 = !{i32 1, !"PIC Level", i32 2}
+!12 = !{!"clang version 3.7.0 (trunk 239176) (llvm/trunk 239190)"}
+!13 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "b", arg: 1, scope: !4, file: !1, line: 3, type: !7)
+!14 = !DIExpression()
+!15 = !DILocation(line: 3, column: 13, scope: !4)
+!16 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "var", scope: !4, file: !1, line: 4, type: !7)
+!17 = !DILocation(line: 4, column: 6, scope: !4)
+!18 = !DILocation(line: 4, column: 12, scope: !4)
+!19 = !DILocation(line: 4, column: 14, scope: !4)
+!20 = !DILocation(line: 5, column: 9, scope: !4)
+!21 = !DILocation(line: 5, column: 2, scope: !4)
+!22 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "b", arg: 1, scope: !8, file: !1, line: 8, type: !7)
+!23 = !DILocation(line: 8, column: 13, scope: !8)
+!24 = !DILocation(line: 9, column: 13, scope: !8)
+!25 = !DILocation(line: 9, column: 9, scope: !8)
+!26 = !DILocation(line: 9, column: 2, scope: !8)
diff --git a/test/tools/dsymutil/Inputs/frame.c b/test/tools/dsymutil/Inputs/frame.c
new file mode 100644
index 0000000000000..9ca082d4ae5d7
--- /dev/null
+++ b/test/tools/dsymutil/Inputs/frame.c
@@ -0,0 +1,10 @@
+int foo(int *f);
+
+int bar(int b) {
+ int var = b + 1;
+ return foo(&var);
+}
+
+int baz(int b) {
+ return bar(b);
+}
diff --git a/test/tools/dsymutil/X86/basic-linking-x86.test b/test/tools/dsymutil/X86/basic-linking-x86.test
index 1059e233f94b9..19b4e3bef6637 100644
--- a/test/tools/dsymutil/X86/basic-linking-x86.test
+++ b/test/tools/dsymutil/X86/basic-linking-x86.test
@@ -6,6 +6,8 @@ RUN: llvm-dsymutil -o %t2 -oso-prepend-path=%p/.. %p/../Inputs/basic.macho.x86_6
RUN: llvm-dwarfdump %t2 | FileCheck %s
RUN: llvm-dsymutil -o - -oso-prepend-path=%p/.. %p/../Inputs/basic.macho.x86_64 | llvm-dwarfdump - | FileCheck %s --check-prefix=CHECK --check-prefix=BASIC
RUN: llvm-dsymutil -o - -oso-prepend-path=%p/.. %p/../Inputs/basic-archive.macho.x86_64 | llvm-dwarfdump - | FileCheck %s --check-prefix=CHECK --check-prefix=ARCHIVE
+RUN: llvm-dsymutil -dump-debug-map -oso-prepend-path=%p/.. %p/../Inputs/basic.macho.x86_64 | llvm-dsymutil -y -o - - | llvm-dwarfdump - | FileCheck %s --check-prefix=CHECK --check-prefix=BASIC
+RUN: llvm-dsymutil -dump-debug-map -oso-prepend-path=%p/.. %p/../Inputs/basic-archive.macho.x86_64 | llvm-dsymutil -o - -y - | llvm-dwarfdump - | FileCheck %s --check-prefix=CHECK --check-prefix=ARCHIVE
CHECK: file format Mach-O 64-bit x86-64
@@ -118,7 +120,7 @@ ARCHIVE: DW_AT_location [DW_FORM_block1] (<0x09> 03 08 10 00 00 01 00 00 00 )
CHECK: DW_TAG_volatile_type [10]
CHECK: DW_AT_type [DW_FORM_ref4] (cu + 0x0041 => {0x00000167})
CHECK: DW_TAG_base_type [4]
-CHACK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000060] = "int")
+CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000060] = "int")
CHECK: DW_TAG_subprogram [2] *
CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x0000009b] = "bar")
CHECK: DW_AT_type [DW_FORM_ref4] (cu + 0x0041 => {0x00000167})
diff --git a/test/tools/dsymutil/X86/basic-lto-linking-x86.test b/test/tools/dsymutil/X86/basic-lto-linking-x86.test
index 22b6e0808d8a5..395234e96166a 100644
--- a/test/tools/dsymutil/X86/basic-lto-linking-x86.test
+++ b/test/tools/dsymutil/X86/basic-lto-linking-x86.test
@@ -1,5 +1,6 @@
REQUIRES: shell
RUN: llvm-dsymutil -o - -oso-prepend-path=%p/.. %p/../Inputs/basic-lto.macho.x86_64 | llvm-dwarfdump - | FileCheck %s
+RUN: llvm-dsymutil -oso-prepend-path=%p/.. -dump-debug-map %p/../Inputs/basic-lto.macho.x86_64 | llvm-dsymutil -o - -y - | llvm-dwarfdump - | FileCheck %s
CHECK: file format Mach-O 64-bit x86-64
diff --git a/test/tools/dsymutil/X86/frame-1.test b/test/tools/dsymutil/X86/frame-1.test
new file mode 100644
index 0000000000000..7852e68a142ab
--- /dev/null
+++ b/test/tools/dsymutil/X86/frame-1.test
@@ -0,0 +1,32 @@
+# REQUIRES: object-emission
+# RUN: rm -rf %t
+# RUN: mkdir -p %t
+# RUN: llc -filetype=obj %p/../Inputs/frame-dw2.ll -o %t/frame-dw2.o
+# RUN: llvm-dsymutil -oso-prepend-path=%t -y %s -o - | llvm-dwarfdump -debug-dump=frames - | FileCheck %s
+
+# This test is meant to verify that identical CIEs will get reused
+# in the same file but also inbetween files. For this to happen, we
+# link twice the same file using this made-up debug map:
+
+---
+triple: 'i386-unknown-unknown-macho'
+objects:
+ - filename: frame-dw2.o
+ symbols:
+ - { sym: _bar, objAddr: 0x0, binAddr: 0x1000, size: 0x12 }
+ - { sym: _baz, objAddr: 0x0, binAddr: 0x2000, size: 0x12 }
+ - filename: frame-dw2.o
+ symbols:
+ - { sym: _baz, objAddr: 0x0, binAddr: 0x3000, size: 0x12 }
+...
+
+# CHECK: .debug_frame contents:
+# CHECK: 00000000 {{[0-9a-f]*}} ffffffff CIE
+# CHECK-NOT: FDE
+# CHECK: FDE cie=00000000 pc=00001000...00001
+# CHECK-NOT: FDE
+# CHECK: FDE cie=00000000 pc=00002000...00002
+# CHECK-NOT: FDE
+# CHECK: FDE cie=00000000 pc=00003000...00003
+# CHECK-NOT: FDE
+
diff --git a/test/tools/dsymutil/X86/frame-2.test b/test/tools/dsymutil/X86/frame-2.test
new file mode 100644
index 0000000000000..168e342a4f743
--- /dev/null
+++ b/test/tools/dsymutil/X86/frame-2.test
@@ -0,0 +1,47 @@
+# REQUIRES: object-emission
+# RUN: rm -rf %t
+# RUN: mkdir -p %t
+# RUN: llc -filetype=obj %p/../Inputs/frame-dw2.ll -o %t/frame-dw2.o
+# RUN: llc -filetype=obj %p/../Inputs/frame-dw4.ll -o %t/frame-dw4.o
+# RUN: llvm-dsymutil -oso-prepend-path=%t -y %s -o - | llvm-dwarfdump -debug-dump=frames - | FileCheck %s
+
+# Check the handling of multiple different CIEs. To have CIEs that
+# appear to be different, use a dwarf2 version of the file along with
+# a dwarf 4 version. The CIE header version (and layout) will be different.
+# FIXME: this test also checks that we didn't reuse the first CIE when it
+# appears again. This is a behavior we inherited from dsymutil-classic
+# but this should be fixed (see comment in patchFrameInfoForObject())
+---
+triple: 'i386-unknown-unknown-macho'
+objects:
+ - filename: frame-dw2.o
+ symbols:
+ - { sym: _bar, objAddr: 0x0, binAddr: 0x1000, size: 0x12 }
+ - { sym: _baz, objAddr: 0x0, binAddr: 0x2000, size: 0x12 }
+ - filename: frame-dw4.o
+ symbols:
+ - { sym: _baz, objAddr: 0x0, binAddr: 0x3000, size: 0x12 }
+ - filename: frame-dw2.o
+ symbols:
+ - { sym: _bar, objAddr: 0x0, binAddr: 0x4000, size: 0x12 }
+...
+
+# CHECK: .debug_frame contents:
+# CHECK: 00000000 {{[0-9a-f]*}} ffffffff CIE
+# CHECK-NEXT: Version:{{.*}}1
+# CHECK-NOT: FDE
+# CHECK: FDE cie=00000000 pc=00001000...00001
+# CHECK-NOT: FDE
+# CHECK: FDE cie=00000000 pc=00002000...00002
+# CHECK-NOT: FDE
+# CHECK: [[CIEDW4:[0-9a-f]*]] 00000010 ffffffff CIE
+# CHECK-NEXT: Version:{{.*}}4
+# CHECK-NOT: FDE
+# CHECK: FDE cie=[[CIEDW4]] pc=00003000...00003
+# CHECK-NOT: FDE
+# CHECK: [[CIEDW2:[0-9a-f]*]] {{[0-9a-f]*}} ffffffff CIE
+# CHECK-NEXT: Version:{{.*}}1
+# CHECK-NOT: FDE
+# CHECK: FDE cie=[[CIEDW2]] pc=00004000...00004
+# CHECK-NOT: FDE
+
diff --git a/test/tools/dsymutil/debug-map-parsing.test b/test/tools/dsymutil/debug-map-parsing.test
index 3422316191feb..5091dfbfc24d6 100644
--- a/test/tools/dsymutil/debug-map-parsing.test
+++ b/test/tools/dsymutil/debug-map-parsing.test
@@ -1,40 +1,42 @@
-RUN: llvm-dsymutil -v -parse-only -oso-prepend-path=%p %p/Inputs/basic.macho.x86_64 | FileCheck %s
-RUN: llvm-dsymutil -v -parse-only -oso-prepend-path=%p %p/Inputs/basic-lto.macho.x86_64 | FileCheck %s --check-prefix=CHECK-LTO
-RUN: llvm-dsymutil -v -parse-only -oso-prepend-path=%p %p/Inputs/basic-archive.macho.x86_64 | FileCheck %s --check-prefix=CHECK-ARCHIVE
-RUN: llvm-dsymutil -v -parse-only %p/Inputs/basic.macho.x86_64 2>&1 | FileCheck %s --check-prefix=NOT-FOUND
-RUN: not llvm-dsymutil -v -parse-only %p/Inputs/inexistant 2>&1 | FileCheck %s --check-prefix=NO-EXECUTABLE
+RUN: llvm-dsymutil -v -dump-debug-map -oso-prepend-path=%p %p/Inputs/basic.macho.x86_64 | FileCheck %s
+RUN: llvm-dsymutil -v -dump-debug-map -oso-prepend-path=%p %p/Inputs/basic-lto.macho.x86_64 | FileCheck %s --check-prefix=CHECK-LTO
+RUN: llvm-dsymutil -v -dump-debug-map -oso-prepend-path=%p %p/Inputs/basic-archive.macho.x86_64 | FileCheck %s --check-prefix=CHECK-ARCHIVE
+RUN: llvm-dsymutil -v -dump-debug-map %p/Inputs/basic.macho.x86_64 2>&1 | FileCheck %s --check-prefix=NOT-FOUND
+RUN: not llvm-dsymutil -v -dump-debug-map %p/Inputs/inexistant 2>&1 | FileCheck %s --check-prefix=NO-EXECUTABLE
Check that We can parse the debug map of the basic executable.
CHECK-NOT: error
-CHECK: DEBUG MAP: x86_64-unknown-unknown-macho
-CHECK: /Inputs/basic1.macho.x86_64.o:
-CHECK: 0000000000000000 => 0000000100000ea0+0x24 _main
-CHECK: /Inputs/basic2.macho.x86_64.o:
-CHECK: 0000000000000310 => 0000000100001000+0x0 _baz
-CHECK: 0000000000000020 => 0000000100000ed0+0x50 _foo
-CHECK: 0000000000000070 => 0000000100000f20+0x17 _inc
-CHECK: 0000000000000560 => 0000000100001008+0x0 _private_int
-CHECK: /Inputs/basic3.macho.x86_64.o:
-CHECK: 0000000000000020 => 0000000100000f40+0x50 _bar
-CHECK: 0000000000000070 => 0000000100000f90+0x19 _inc
-CHECK: 0000000000000004 => 0000000100001004+0x0 _val
-CHECK: END DEBUG MAP
+CHECK: ---
+CHECK: triple: 'x86_64-unknown-unknown-macho'
+CHECK: filename:{{.*}}/Inputs/basic1.macho.x86_64.o
+CHECK-DAG: sym: _main, objAddr: 0x0000000000000000, binAddr: 0x0000000100000EA0, size: 0x00000024
+CHECK: filename{{.*}}/Inputs/basic2.macho.x86_64.o
+CHECK-DAG: sym: _baz, objAddr: 0x0000000000000310, binAddr: 0x0000000100001000, size: 0x00000000
+CHECK-DAG: sym: _foo, objAddr: 0x0000000000000020, binAddr: 0x0000000100000ED0, size: 0x00000050
+CHECK-DAG: sym: _inc, objAddr: 0x0000000000000070, binAddr: 0x0000000100000F20, size: 0x00000017
+CHECK-DAG: sym: _private_int, objAddr: 0x0000000000000560, binAddr: 0x0000000100001008, size: 0x00000000
+CHECK: filename{{.*}}/Inputs/basic3.macho.x86_64.o
+CHECK-DAG: sym: _bar, objAddr: 0x0000000000000020, binAddr: 0x0000000100000F40, size: 0x00000050
+CHECK-DAG: sym: _inc, objAddr: 0x0000000000000070, binAddr: 0x0000000100000F90, size: 0x00000019
+CHECK-DAG: sym: _val, objAddr: 0x0000000000000004, binAddr: 0x0000000100001004, size: 0x00000000
+CHECK: ...
Check that we can parse the debug-map of the basic-lto executable
CHECK-LTO-NOT: error
-CHECK-LTO: DEBUG MAP: x86_64-unknown-unknown-macho
-CHECK-LTO: /Inputs/basic-lto.macho.x86_64.o:
-CHECK-LTO: 0000000000000050 => 0000000100000f90+0x24 _bar
-CHECK-LTO: 0000000000000658 => 0000000100001000+0x0 _baz
-CHECK-LTO: 0000000000000010 => 0000000100000f50+0x40 _foo
-CHECK-LTO: 0000000000000000 => 0000000100000f40+0x10 _main
-CHECK-LTO: 00000000000008e8 => 0000000100001008+0x0 _private_int
-CHECK-LTO: 00000000000008ec => 0000000100001004+0x0 _val
-CHECK-LTO: END DEBUG MAP
+CHECK-LTO: ---
+CHECK-LTO: triple: 'x86_64-unknown-unknown-macho'
+CHECK-LTO: /Inputs/basic-lto.macho.x86_64.o
+CHECK-LTO-DAG: sym: _bar, objAddr: 0x0000000000000050, binAddr: 0x0000000100000F90, size: 0x00000024
+CHECK-LTO-DAG: sym: _baz, objAddr: 0x0000000000000658, binAddr: 0x0000000100001000, size: 0x00000000
+CHECK-LTO-DAG: sym: _foo, objAddr: 0x0000000000000010, binAddr: 0x0000000100000F50, size: 0x00000040
+CHECK-LTO-DAG: sym: _main, objAddr: 0x0000000000000000, binAddr: 0x0000000100000F40, size: 0x00000010
+CHECK-LTO-DAG: sym: _private_int, objAddr: 0x00000000000008E8, binAddr: 0x0000000100001008, size: 0x00000000
+CHECK-LTO-DAG: sym: _val, objAddr: 0x00000000000008EC, binAddr: 0x0000000100001004, size: 0x00000000
+CHECK-LTO: ...
Check thet we correctly handle debug maps with archive members (including only
opening the archive once if mulitple of its members are used).
@@ -48,20 +50,20 @@ CHECK-ARCHIVE-NEXT: opened new archive {{.*}}/libbasic.a'
CHECK-ARCHIVE-NEXT: found member in current archive.
CHECK-ARCHIVE-NEXT: trying to open {{.*}}/libbasic.a(basic3.macho.x86_64.o)'
CHECK-ARCHIVE-NEXT: found member in current archive.
-CHECK-ARCHIVE: DEBUG MAP: x86_64-unknown-unknown-macho
-CHECK-ARCHIVE: object addr => executable addr symbol name
-CHECK-ARCHIVE: /Inputs/basic1.macho.x86_64.o:
-CHECK-ARCHIVE: 0000000000000000 => 0000000100000ea0+0x24 _main
-CHECK-ARCHIVE: /Inputs/./libbasic.a(basic2.macho.x86_64.o):
-CHECK-ARCHIVE: 0000000000000310 => 0000000100001000+0x0 _baz
-CHECK-ARCHIVE: 0000000000000020 => 0000000100000ed0+0x50 _foo
-CHECK-ARCHIVE: 0000000000000070 => 0000000100000f20+0x17 _inc
-CHECK-ARCHIVE: 0000000000000560 => 0000000100001004+0x0 _private_int
-CHECK-ARCHIVE: /Inputs/./libbasic.a(basic3.macho.x86_64.o):
-CHECK-ARCHIVE: 0000000000000020 => 0000000100000f40+0x50 _bar
-CHECK-ARCHIVE: 0000000000000070 => 0000000100000f90+0x19 _inc
-CHECK-ARCHIVE: 0000000000000004 => 0000000100001008+0x0 _val
-CHECK-ARCHIVE: END DEBUG MAP
+CHECK-ARCHIVE: ---
+CHECK-ARCHIVE: triple: 'x86_64-unknown-unknown-macho'
+CHECK-ARCHIVE: /Inputs/basic1.macho.x86_64.o
+CHECK-ARCHIVE-DAG: sym: _main, objAddr: 0x0000000000000000, binAddr: 0x0000000100000EA0, size: 0x00000024
+CHECK-ARCHIVE: /Inputs/./libbasic.a(basic2.macho.x86_64.o)
+CHECK-ARCHIVE-DAG: sym: _baz, objAddr: 0x0000000000000310, binAddr: 0x0000000100001000, size: 0x00000000
+CHECK-ARCHIVE-DAG: sym: _foo, objAddr: 0x0000000000000020, binAddr: 0x0000000100000ED0, size: 0x00000050
+CHECK-ARCHIVE-DAG: sym: _inc, objAddr: 0x0000000000000070, binAddr: 0x0000000100000F20, size: 0x00000017
+CHECK-ARCHIVE-DAG: sym: _private_int, objAddr: 0x0000000000000560, binAddr: 0x0000000100001004, size: 0x00000000
+CHECK-ARCHIVE: /Inputs/./libbasic.a(basic3.macho.x86_64.o)
+CHECK-ARCHIVE-DAG: sym: _bar, objAddr: 0x0000000000000020, binAddr: 0x0000000100000F40, size: 0x00000050
+CHECK-ARCHIVE-DAG: sym: _inc, objAddr: 0x0000000000000070, binAddr: 0x0000000100000F90, size: 0x00000019
+CHECK-ARCHIVE-DAG: sym: _val, objAddr: 0x0000000000000004, binAddr: 0x0000000100001008, size: 0x00000000
+CHECK-ARCHIVE: ...
Check that we warn about missing object files (this presumes that the files aren't
present in the machine's /Inputs/ folder, which should be a pretty safe bet).
@@ -69,11 +71,11 @@ present in the machine's /Inputs/ folder, which should be a pretty safe bet).
NOT-FOUND: cannot open{{.*}}"/Inputs/basic1.macho.x86_64.o": {{[Nn]o}} such file
NOT-FOUND: cannot open{{.*}}"/Inputs/basic2.macho.x86_64.o": {{[Nn]o}} such file
NOT-FOUND: cannot open{{.*}}"/Inputs/basic3.macho.x86_64.o": {{[Nn]o}} such file
-NOT-FOUND: DEBUG MAP:
-NOT-FOUND-NEXT: object addr => executable addr symbol name
-NOT-FOUND-NEXT: END DEBUG MAP
+NOT-FOUND: ---
+NOT-FOUND-NEXT: triple: 'x86_64-unknown-unknown-macho'
+NOT-FOUND-NEXT: ...
Check that we correctly error out on invalid executatble.
NO-EXECUTABLE: cannot parse{{.*}}/inexistant": {{[Nn]o}} such file
-NO-EXECUTABLE-NOT: DEBUG MAP
+NO-EXECUTABLE-NOT: ---
diff --git a/test/tools/dsymutil/yaml-object-address-rewrite.test b/test/tools/dsymutil/yaml-object-address-rewrite.test
new file mode 100644
index 0000000000000..dcb39be891cd7
--- /dev/null
+++ b/test/tools/dsymutil/yaml-object-address-rewrite.test
@@ -0,0 +1,44 @@
+# RUN: llvm-dsymutil -v -dump-debug-map -oso-prepend-path=%p -y %s | FileCheck %s
+#
+# The YAML debug map bellow is the one from basic-archive.macho.x86_64 with
+# the object addresses set to zero. Check that the YAML import is able to
+# rewrite these addresses to the right values.
+#
+# CHECK: ---
+# CHECK-NEXT: triple:{{.*}}'x86_64-unknown-unknown-macho'
+# CHECK-NEXT: objects:
+# CHECK-NEXT: filename:{{.*}}/Inputs/basic1.macho.x86_64.o
+# CHECK-NEXT: symbols:
+# CHECK-NEXT: sym: _main, objAddr: 0x0000000000000000, binAddr: 0x0000000100000EA0, size: 0x00000024
+# CHECK-NEXT: filename:{{.*}}/Inputs/./libbasic.a(basic2.macho.x86_64.o)'
+# CHECK-NEXT: symbols:
+# CHECK-DAG: sym: _foo, objAddr: 0x0000000000000020, binAddr: 0x0000000100000ED0, size: 0x00000050
+# CHECK-DAG: sym: _private_int, objAddr: 0x0000000000000560, binAddr: 0x0000000100001004, size: 0x00000000
+# CHECK-DAG: sym: _inc, objAddr: 0x0000000000000070, binAddr: 0x0000000100000F20, size: 0x00000017
+# CHECK-DAG: sym: _baz, objAddr: 0x0000000000000310, binAddr: 0x0000000100001000, size: 0x00000000
+# CHECK-NOT: { sym:
+# CHECK-NEXT: filename:{{.*}}/Inputs/./libbasic.a(basic3.macho.x86_64.o)'
+# CHECK-NEXT: symbols:
+# CHECK-DAG: sym: _val, objAddr: 0x0000000000000004, binAddr: 0x0000000100001008, size: 0x00000000
+# CHECK-DAG: sym: _bar, objAddr: 0x0000000000000020, binAddr: 0x0000000100000F40, size: 0x00000050
+# CHECK-DAG: sym: _inc, objAddr: 0x0000000000000070, binAddr: 0x0000000100000F90, size: 0x00000019
+# CHECK-NOT: { sym:
+# CHECK-NEXT: ...
+---
+triple: 'x86_64-unknown-unknown-macho'
+objects:
+ - filename: /Inputs/basic1.macho.x86_64.o
+ symbols:
+ - { sym: _main, objAddr: 0x0, binAddr: 0x0000000100000EA0, size: 0x00000024 }
+ - filename: /Inputs/./libbasic.a(basic2.macho.x86_64.o)
+ symbols:
+ - { sym: _foo, objAddr: 0x0, binAddr: 0x0000000100000ED0, size: 0x00000050 }
+ - { sym: _private_int, objAddr: 0x0, binAddr: 0x0000000100001004, size: 0x00000000 }
+ - { sym: _inc, objAddr: 0x0, binAddr: 0x0000000100000F20, size: 0x00000017 }
+ - { sym: _baz, objAddr: 0x0, binAddr: 0x0000000100001000, size: 0x00000000 }
+ - filename: /Inputs/./libbasic.a(basic3.macho.x86_64.o)
+ symbols:
+ - { sym: _val, objAddr: 0x0, binAddr: 0x0000000100001008, size: 0x00000000 }
+ - { sym: _bar, objAddr: 0x0, binAddr: 0x0000000100000F40, size: 0x00000050 }
+ - { sym: _inc, objAddr: 0x0, binAddr: 0x0000000100000F90, size: 0x00000019 }
+...
diff --git a/test/tools/llvm-objdump/invalid-input.test b/test/tools/llvm-objdump/invalid-input.test
new file mode 100644
index 0000000000000..20a901d0b2ded
--- /dev/null
+++ b/test/tools/llvm-objdump/invalid-input.test
@@ -0,0 +1,6 @@
+RUN: not llvm-objdump -t %p/missing-file 2>&1 | FileCheck %s -check-prefix=NO_SUCH_FILE
+# Don't check the OS-dependent message "No such file or directory".
+NO_SUCH_FILE: '{{.*}}missing-file':
+
+RUN: not llvm-objdump -t %s 2>&1 | FileCheck %s -check-prefix=UNKNOWN_FILE_TYPE
+UNKNOWN_FILE_TYPE: '{{.*}}invalid-input.test': The file was not recognized as a valid object file
diff --git a/test/tools/llvm-readobj/elf-dtflags.test b/test/tools/llvm-readobj/elf-dtflags.test
index 0ed1c7adf91dd..4e6c90d9f06ba 100644
--- a/test/tools/llvm-readobj/elf-dtflags.test
+++ b/test/tools/llvm-readobj/elf-dtflags.test
@@ -1,4 +1,8 @@
// Test that llvm-readobj dumps DF_XXX and DF_1_XXX flags correctly.
+// The input was generated using the following:
+// $ clang -Wl,-z,origin -Wl,-z,now example.c
+// $ cat example.c
+// int main(void) { return (0); }
RUN: llvm-readobj -dynamic-table %p/Inputs/dtflags.elf-x86-64 | FileCheck %s
diff --git a/test/tools/llvm-readobj/sections-ext.test b/test/tools/llvm-readobj/sections-ext.test
index 4024878d2bde6..6b4a674497daf 100644
--- a/test/tools/llvm-readobj/sections-ext.test
+++ b/test/tools/llvm-readobj/sections-ext.test
@@ -110,7 +110,7 @@ ELF-NEXT: Relocations [
ELF-NEXT: ]
ELF-NEXT: Symbols [
ELF-NEXT: Symbol {
-ELF-NEXT: Name: .text (0)
+ELF-NEXT: Name: (0)
ELF-NEXT: Value: 0x0
ELF-NEXT: Size: 0
ELF-NEXT: Binding: Local (0x0)