summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/CostModel/SystemZ/fp-arith.ll53
-rw-r--r--test/Assembler/diimportedentity.ll4
-rw-r--r--test/Bitcode/DIGlobalVariableExpression.ll2
-rw-r--r--test/Bitcode/compatibility-3.6.ll6
-rw-r--r--test/Bitcode/compatibility-3.7.ll6
-rw-r--r--test/Bitcode/compatibility-3.8.ll6
-rw-r--r--test/Bitcode/compatibility-3.9.ll6
-rw-r--r--test/Bitcode/compatibility-4.0.ll6
-rw-r--r--test/Bitcode/compatibility.ll6
-rw-r--r--test/Bitcode/upgrade-importedentity.ll15
-rw-r--r--test/Bitcode/upgrade-importedentity.ll.bcbin0 -> 1216 bytes
-rw-r--r--test/CMakeLists.txt3
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-fma.mir41
-rw-r--r--test/CodeGen/AArch64/aarch64_win64cc_vararg.ll74
-rw-r--r--test/CodeGen/AArch64/arm64-abi-varargs.ll3
-rw-r--r--test/CodeGen/AArch64/arm64-abi_align.ll32
-rw-r--r--test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-extern-weak.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm.ll10
-rw-r--r--test/CodeGen/AArch64/arm64-platform-reg.ll1
-rw-r--r--test/CodeGen/AArch64/arm64-vext.ll8
-rw-r--r--test/CodeGen/AArch64/atomic-ops-lse.ll161
-rw-r--r--test/CodeGen/AArch64/dag-combine-invaraints.ll2
-rw-r--r--test/CodeGen/AArch64/extern-weak.ll2
-rw-r--r--test/CodeGen/AArch64/falkor-hwpf-fix.ll67
-rw-r--r--test/CodeGen/AArch64/falkor-hwpf-fix.mir52
-rw-r--r--test/CodeGen/AArch64/falkor-hwpf.ll106
-rw-r--r--test/CodeGen/AArch64/preferred-function-alignment.ll2
-rw-r--r--test/CodeGen/AArch64/swifterror.ll12
-rw-r--r--test/CodeGen/AArch64/win64_vararg.ll95
-rw-r--r--test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll312
-rw-r--r--test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll11
-rw-r--r--test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll2
-rw-r--r--test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll14
-rw-r--r--test/CodeGen/AMDGPU/fcanonicalize-elimination.ll62
-rw-r--r--test/CodeGen/AMDGPU/function-args.ll16
-rw-r--r--test/CodeGen/AMDGPU/hsa.ll10
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll12
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.waitcnt.ll4
-rw-r--r--test/CodeGen/AMDGPU/move-to-valu-worklist.ll29
-rw-r--r--test/CodeGen/AMDGPU/mubuf-offset-private.ll26
-rw-r--r--test/CodeGen/AMDGPU/parallelandifcollapse.ll2
-rw-r--r--test/CodeGen/AMDGPU/parallelorifcollapse.ll2
-rw-r--r--test/CodeGen/AMDGPU/private-access-no-objects.ll10
-rw-r--r--test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir2
-rw-r--r--test/CodeGen/AMDGPU/scratch-simple.ll72
-rw-r--r--test/CodeGen/AMDGPU/sdwa-peephole-instr.mir35
-rw-r--r--test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir4
-rw-r--r--test/CodeGen/AMDGPU/trap.ll8
-rw-r--r--test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir2
-rw-r--r--test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll36
-rw-r--r--test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll6
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir39
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll52
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel.ll39
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir174
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalizer.mir36
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir30
-rw-r--r--test/CodeGen/ARM/atomic-op.ll15
-rw-r--r--test/CodeGen/AVR/branch-relaxation.ll4
-rw-r--r--test/CodeGen/BPF/select_ri.ll27
-rw-r--r--test/CodeGen/BPF/setcc.ll4
-rw-r--r--test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll1
-rw-r--r--test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll3
-rw-r--r--test/CodeGen/Generic/print-mul-exp.ll1
-rw-r--r--test/CodeGen/Generic/print-mul.ll1
-rw-r--r--test/CodeGen/Generic/print-shift.ll1
-rw-r--r--test/CodeGen/Generic/v-split.ll3
-rw-r--r--test/CodeGen/Generic/vector-redux.ll3
-rw-r--r--test/CodeGen/Generic/vector.ll3
-rw-r--r--test/CodeGen/Hexagon/intrinsics/system_user.ll76
-rw-r--r--test/CodeGen/Hexagon/switch-lut-explicit-section.ll32
-rw-r--r--test/CodeGen/Hexagon/switch-lut-function-section.ll30
-rw-r--r--test/CodeGen/Hexagon/switch-lut-multiple-functions.ll42
-rw-r--r--test/CodeGen/Hexagon/switch-lut-text-section.ll27
-rw-r--r--test/CodeGen/Hexagon/v6vec-vprint.ll2
-rw-r--r--test/CodeGen/Hexagon/vect/vect-load-v4i16.ll23
-rw-r--r--test/CodeGen/Hexagon/vect/vect-v4i16.ll (renamed from test/CodeGen/Hexagon/vect/vect-loadv4i16.ll)0
-rw-r--r--test/CodeGen/MIR/AArch64/target-memoperands.mir4
-rw-r--r--test/CodeGen/MIR/AMDGPU/fold-multiple.mir40
-rw-r--r--test/CodeGen/MSP430/vararg.ll4
-rw-r--r--test/CodeGen/Mips/2008-06-05-Carry.ll13
-rw-r--r--test/CodeGen/Mips/dins.ll4
-rw-r--r--test/CodeGen/Mips/dsp-patterns.ll4
-rw-r--r--test/CodeGen/Mips/llcarry.ll11
-rw-r--r--test/CodeGen/Mips/llvm-ir/add.ll394
-rw-r--r--test/CodeGen/Mips/llvm-ir/sub.ll174
-rw-r--r--test/CodeGen/Mips/long-calls.ll57
-rw-r--r--test/CodeGen/Mips/madd-msub.ll81
-rw-r--r--test/CodeGen/Mips/msa/f16-llvm-ir.ll12
-rw-r--r--test/CodeGen/PowerPC/PR33671.ll32
-rw-r--r--test/CodeGen/PowerPC/build-vector-tests.ll40
-rw-r--r--test/CodeGen/PowerPC/ppc64-i128-abi.ll6
-rw-r--r--test/CodeGen/PowerPC/swaps-le-6.ll8
-rw-r--r--test/CodeGen/PowerPC/vsx-p9.ll48
-rw-r--r--test/CodeGen/SPARC/soft-mul-div.ll65
-rw-r--r--test/CodeGen/SystemZ/branch-11.ll56
-rw-r--r--test/CodeGen/SystemZ/fp-abs-03.ll43
-rw-r--r--test/CodeGen/SystemZ/fp-abs-04.ll46
-rw-r--r--test/CodeGen/SystemZ/fp-add-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-add-04.ll17
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-01.ll102
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-06.ll33
-rw-r--r--test/CodeGen/SystemZ/fp-const-11.ll40
-rw-r--r--test/CodeGen/SystemZ/fp-conv-15.ll50
-rw-r--r--test/CodeGen/SystemZ/fp-conv-16.ll99
-rw-r--r--test/CodeGen/SystemZ/fp-copysign-02.ll81
-rw-r--r--test/CodeGen/SystemZ/fp-div-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-div-04.ll17
-rw-r--r--test/CodeGen/SystemZ/fp-move-13.ll46
-rw-r--r--test/CodeGen/SystemZ/fp-mul-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-mul-06.ll31
-rw-r--r--test/CodeGen/SystemZ/fp-mul-08.ll31
-rw-r--r--test/CodeGen/SystemZ/fp-mul-10.ll43
-rw-r--r--test/CodeGen/SystemZ/fp-mul-11.ll32
-rw-r--r--test/CodeGen/SystemZ/fp-mul-12.ll72
-rw-r--r--test/CodeGen/SystemZ/fp-neg-02.ll41
-rw-r--r--test/CodeGen/SystemZ/fp-round-03.ll207
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-01.ll8
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-04.ll17
-rw-r--r--test/CodeGen/SystemZ/fp-sub-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-sub-04.ll17
-rw-r--r--test/CodeGen/SystemZ/int-add-17.ll95
-rw-r--r--test/CodeGen/SystemZ/int-mul-09.ll95
-rw-r--r--test/CodeGen/SystemZ/int-mul-10.ll165
-rw-r--r--test/CodeGen/SystemZ/int-mul-11.ll32
-rw-r--r--test/CodeGen/SystemZ/int-sub-10.ll95
-rw-r--r--test/CodeGen/SystemZ/tdc-07.ll18
-rw-r--r--test/CodeGen/SystemZ/vec-abs-06.ll47
-rw-r--r--test/CodeGen/SystemZ/vec-add-02.ll24
-rw-r--r--test/CodeGen/SystemZ/vec-and-04.ll47
-rw-r--r--test/CodeGen/SystemZ/vec-cmp-07.ll349
-rw-r--r--test/CodeGen/SystemZ/vec-ctpop-02.ll45
-rw-r--r--test/CodeGen/SystemZ/vec-div-02.ll24
-rw-r--r--test/CodeGen/SystemZ/vec-intrinsics-01.ll (renamed from test/CodeGen/SystemZ/vec-intrinsics.ll)0
-rw-r--r--test/CodeGen/SystemZ/vec-intrinsics-02.ll441
-rw-r--r--test/CodeGen/SystemZ/vec-max-05.ll175
-rw-r--r--test/CodeGen/SystemZ/vec-min-05.ll175
-rw-r--r--test/CodeGen/SystemZ/vec-move-18.ll24
-rw-r--r--test/CodeGen/SystemZ/vec-mul-03.ll24
-rw-r--r--test/CodeGen/SystemZ/vec-mul-04.ll31
-rw-r--r--test/CodeGen/SystemZ/vec-mul-05.ll63
-rw-r--r--test/CodeGen/SystemZ/vec-neg-02.ll23
-rw-r--r--test/CodeGen/SystemZ/vec-or-03.ll91
-rw-r--r--test/CodeGen/SystemZ/vec-round-02.ll118
-rw-r--r--test/CodeGen/SystemZ/vec-sqrt-02.ll23
-rw-r--r--test/CodeGen/SystemZ/vec-sub-02.ll31
-rw-r--r--test/CodeGen/SystemZ/vec-xor-02.ll47
-rw-r--r--test/CodeGen/Thumb/litpoolremat.ll28
-rw-r--r--test/CodeGen/Thumb/select.ll4
-rw-r--r--test/CodeGen/WebAssembly/indirect-import.ll9
-rw-r--r--test/CodeGen/WebAssembly/userstack.ll10
-rw-r--r--test/CodeGen/X86/2008-01-08-SchedulerCrash.ll2
-rw-r--r--test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll2
-rw-r--r--test/CodeGen/X86/2011-10-19-widen_vselect.ll7
-rw-r--r--test/CodeGen/X86/DynamicCalleeSavedRegisters.ll2
-rw-r--r--test/CodeGen/X86/alias-static-alloca.ll37
-rw-r--r--test/CodeGen/X86/atomic-minmax-i6432.ll16
-rw-r--r--test/CodeGen/X86/atomic128.ll64
-rw-r--r--test/CodeGen/X86/avx-schedule.ll508
-rw-r--r--test/CodeGen/X86/avx2-arith.ll8
-rw-r--r--test/CodeGen/X86/avx2-schedule.ll116
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll4
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll2
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll5
-rw-r--r--test/CodeGen/X86/avx512-rotate.ll256
-rw-r--r--test/CodeGen/X86/avx512-shift.ll148
-rw-r--r--test/CodeGen/X86/bmi-schedule.ll529
-rw-r--r--test/CodeGen/X86/bmi2-schedule.ll180
-rw-r--r--test/CodeGen/X86/bool-ext-inc.ll8
-rw-r--r--test/CodeGen/X86/bswap-rotate.ll27
-rw-r--r--test/CodeGen/X86/clobber-fi0.ll14
-rw-r--r--test/CodeGen/X86/combine-rotates.ll27
-rw-r--r--test/CodeGen/X86/combine-shl.ll12
-rw-r--r--test/CodeGen/X86/combine-srl.ll8
-rw-r--r--test/CodeGen/X86/combine-udiv.ll2
-rw-r--r--test/CodeGen/X86/combine-urem.ll8
-rw-r--r--test/CodeGen/X86/f16c-schedule.ll144
-rw-r--r--test/CodeGen/X86/fast-isel-x86-64.ll2
-rw-r--r--test/CodeGen/X86/hipe-cc.ll6
-rw-r--r--test/CodeGen/X86/hipe-cc64.ll6
-rw-r--r--test/CodeGen/X86/lea32-schedule.ll653
-rw-r--r--test/CodeGen/X86/lea64-schedule.ll534
-rw-r--r--test/CodeGen/X86/legalize-shift-64.ll8
-rw-r--r--test/CodeGen/X86/lzcnt-schedule.ll119
-rw-r--r--test/CodeGen/X86/machine-outliner-debuginfo.ll1
-rw-r--r--test/CodeGen/X86/machine-outliner.ll1
-rw-r--r--test/CodeGen/X86/memcmp-minsize.ll721
-rw-r--r--test/CodeGen/X86/memcmp-optsize.ll871
-rw-r--r--test/CodeGen/X86/memcmp.ll827
-rw-r--r--test/CodeGen/X86/pmul.ll6
-rw-r--r--test/CodeGen/X86/popcnt-schedule.ll167
-rw-r--r--test/CodeGen/X86/pr32282.ll104
-rw-r--r--test/CodeGen/X86/pr32515.ll29
-rw-r--r--test/CodeGen/X86/pr33772.ll15
-rw-r--r--test/CodeGen/X86/pr33828.ll48
-rw-r--r--test/CodeGen/X86/regparm.ll2
-rw-r--r--test/CodeGen/X86/rotate_vec.ll54
-rw-r--r--test/CodeGen/X86/sibcall-win64.ll22
-rw-r--r--test/CodeGen/X86/sse-schedule.ll327
-rw-r--r--test/CodeGen/X86/sse2-schedule.ll824
-rw-r--r--test/CodeGen/X86/sse3-schedule.ll64
-rw-r--r--test/CodeGen/X86/sse41-schedule.ll311
-rw-r--r--test/CodeGen/X86/sse42-schedule.ll81
-rw-r--r--test/CodeGen/X86/sse4a-schedule.ll40
-rw-r--r--test/CodeGen/X86/ssse3-schedule.ll98
-rw-r--r--test/CodeGen/X86/statepoint-invoke.ll2
-rw-r--r--test/CodeGen/X86/statepoint-stack-usage.ll42
-rw-r--r--test/CodeGen/X86/statepoint-vector.ll4
-rw-r--r--test/CodeGen/X86/vec_cmp_uint-128.ll8
-rw-r--r--test/CodeGen/X86/vector-idiv-sdiv-128.ll6
-rw-r--r--test/CodeGen/X86/vector-idiv-sdiv-256.ll6
-rw-r--r--test/CodeGen/X86/vector-idiv-udiv-128.ll6
-rw-r--r--test/CodeGen/X86/vector-idiv-udiv-256.ll6
-rw-r--r--test/CodeGen/X86/vector-idiv.ll2
-rw-r--r--test/CodeGen/X86/vector-rotate-128.ll203
-rw-r--r--test/CodeGen/X86/vector-rotate-256.ll256
-rw-r--r--test/CodeGen/X86/vector-rotate-512.ll831
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-256.ll10
-rw-r--r--test/CodeGen/X86/vector-tzcnt-128.ll4
-rw-r--r--test/CodeGen/X86/vector-tzcnt-256.ll8
-rw-r--r--test/CodeGen/X86/vector-tzcnt-512.ll8
-rw-r--r--test/CodeGen/X86/vselect-avx.ll8
-rw-r--r--test/CodeGen/X86/widen_arith-2.ll15
-rw-r--r--test/CodeGen/X86/widen_cast-4.ll34
-rw-r--r--test/CodeGen/X86/win64-nosse-csrs.ll2
-rw-r--r--test/CodeGen/X86/win64_nonvol.ll2
-rw-r--r--test/CodeGen/X86/win64_params.ll2
-rw-r--r--test/CodeGen/X86/win_chkstk.ll2
-rw-r--r--test/CodeGen/X86/win_coreclr_chkstk.ll4
-rw-r--r--test/CodeGen/X86/x86-64-ms_abi-vararg.ll14
-rw-r--r--test/CodeGen/X86/x86-cmov-converter.ll321
-rw-r--r--test/CodeGen/XCore/varargs.ll8
-rw-r--r--test/DebugInfo/Generic/namespace.ll41
-rw-r--r--test/DebugInfo/PDB/pdbdump-headers.test184
-rw-r--r--test/DebugInfo/X86/DIModule.ll2
-rw-r--r--test/DebugInfo/X86/DIModuleContext.ll2
-rw-r--r--test/DebugInfo/X86/fission-inline.ll2
-rw-r--r--test/DebugInfo/X86/gnu-public-names.ll4
-rw-r--r--test/DebugInfo/X86/lexical-block-file-inline.ll2
-rw-r--r--test/DebugInfo/X86/pr19307.ll12
-rw-r--r--test/DllTool/coff-exports.def13
-rw-r--r--test/DllTool/coff-weak-exports.def19
-rw-r--r--test/DllTool/lit.local.cfg1
-rw-r--r--test/FileCheck/regex-scope.txt2
-rw-r--r--test/Instrumentation/AddressSanitizer/basic.ll20
-rw-r--r--test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll48
-rw-r--r--test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll37
-rw-r--r--test/Instrumentation/EfficiencySanitizer/working_set_basic.ll33
-rw-r--r--test/Instrumentation/EfficiencySanitizer/working_set_slow.ll32
-rw-r--r--test/Instrumentation/MemorySanitizer/msan_basic.ll35
-rw-r--r--test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll22
-rw-r--r--test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll22
-rw-r--r--test/Linker/pr26037.ll4
-rw-r--r--test/MC/AArch64/coff-relocations.s52
-rw-r--r--test/MC/AArch64/invalid-instructions-spellcheck.s37
-rw-r--r--test/MC/AMDGPU/gfx9_asm_all.s351
-rw-r--r--test/MC/AMDGPU/vop3-errs.s38
-rw-r--r--test/MC/ARM/virtexts-thumb.s2
-rw-r--r--test/MC/Disassembler/AMDGPU/gfx9_dasm_all.txt405
-rw-r--r--test/MC/Disassembler/Mips/mt/valid-r2-el.txt21
-rw-r--r--test/MC/Disassembler/Mips/mt/valid-r2.txt21
-rw-r--r--test/MC/Disassembler/SystemZ/insns-z14.txt3253
-rw-r--r--test/MC/Mips/mt/invalid-wrong-error.s3
-rw-r--r--test/MC/Mips/mt/invalid.s38
-rw-r--r--test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s18
-rw-r--r--test/MC/Mips/mt/mftr-mttr-aliases-invalid.s23
-rw-r--r--test/MC/Mips/mt/mftr-mttr-aliases.s47
-rw-r--r--test/MC/Mips/mt/mftr-mttr-reserved-valid.s8
-rw-r--r--test/MC/Mips/mt/valid.s42
-rw-r--r--test/MC/SystemZ/insn-bad-z13.s705
-rw-r--r--test/MC/SystemZ/insn-bad-z14.s752
-rw-r--r--test/MC/SystemZ/insn-good-z14.s2674
-rw-r--r--test/MC/SystemZ/invalid-instructions-spellcheck.s66
-rw-r--r--test/MC/X86/pr22028.s4
-rw-r--r--test/Object/no-section-table.test2
-rw-r--r--test/Object/readobj-shared-object.test12
-rw-r--r--test/ObjectYAML/CodeView/guid.yaml59
-rw-r--r--test/Other/cgscc-libcall-update.ll61
-rw-r--r--test/Other/new-pass-manager.ll2
-rw-r--r--test/ThinLTO/X86/debuginfo-cu-import.ll4
-rw-r--r--test/Transforms/CodeGenPrepare/X86/memcmp.ll1635
-rw-r--r--test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll35
-rw-r--r--test/Transforms/EarlyCSE/globalsaa-memoryssa.ll25
-rw-r--r--test/Transforms/GVN/PRE/2017-06-28-pre-load-dbgloc.ll79
-rw-r--r--test/Transforms/GVN/PRE/phi-translate.ll4
-rw-r--r--test/Transforms/GlobalOpt/pr33686.ll17
-rw-r--r--test/Transforms/IRCE/eq_ne.ll257
-rw-r--r--test/Transforms/IRCE/pre_post_loops.ll117
-rw-r--r--test/Transforms/Inline/AArch64/ext.ll249
-rw-r--r--test/Transforms/Inline/PowerPC/ext.ll140
-rw-r--r--test/Transforms/Inline/PowerPC/lit.local.cfg3
-rw-r--r--test/Transforms/Inline/X86/ext.ll201
-rw-r--r--test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll24
-rw-r--r--test/Transforms/InstCombine/and-not-or.ll34
-rw-r--r--test/Transforms/InstCombine/and.ll192
-rw-r--r--test/Transforms/InstCombine/and2.ll85
-rw-r--r--test/Transforms/InstCombine/element-atomic-memintrins.ll98
-rw-r--r--test/Transforms/InstCombine/icmp-logical.ll165
-rw-r--r--test/Transforms/InstCombine/or-xor.ll28
-rw-r--r--test/Transforms/InstCombine/or.ll291
-rw-r--r--test/Transforms/InstCombine/pr33765.ll32
-rw-r--r--test/Transforms/JumpThreading/select.ll77
-rw-r--r--test/Transforms/LoopInterchange/current-limitations-lcssa.ll76
-rw-r--r--test/Transforms/LoopInterchange/interchange-flow-dep-outer.ll118
-rw-r--r--test/Transforms/LoopInterchange/interchange-not-profitable.ll66
-rw-r--r--test/Transforms/LoopInterchange/interchange-output-dependencies.ll86
-rw-r--r--test/Transforms/LoopInterchange/interchange-simple-count-down.ll69
-rw-r--r--test/Transforms/LoopInterchange/interchange-simple-count-up.ll86
-rw-r--r--test/Transforms/LoopInterchange/interchange.ll749
-rw-r--r--test/Transforms/LoopInterchange/loop-interchange-optimization-remarks.ll220
-rw-r--r--test/Transforms/LoopInterchange/not-interchanged-dependencies-1.ll64
-rw-r--r--test/Transforms/LoopInterchange/not-interchanged-loop-nest-3.ll87
-rw-r--r--test/Transforms/LoopInterchange/not-interchanged-tightly-nested.ll143
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop-multiexit-dom-verify.ll126
-rw-r--r--test/Transforms/LoopVectorize/X86/float-induction-x86.ll6
-rw-r--r--test/Transforms/LoopVectorize/debugloc.ll2
-rw-r--r--test/Transforms/LoopVectorize/first-order-recurrence.ll8
-rw-r--r--test/Transforms/LoopVectorize/float-induction.ll14
-rw-r--r--test/Transforms/LoopVectorize/if-conversion-nest.ll25
-rw-r--r--test/Transforms/LoopVectorize/induction-step.ll4
-rw-r--r--test/Transforms/LoopVectorize/induction.ll4
-rw-r--r--test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll6
-rw-r--r--test/Transforms/LoopVectorize/interleaved-accesses.ll10
-rw-r--r--test/Transforms/LoopVectorize/iv_outside_user.ll2
-rw-r--r--test/Transforms/LoopVectorize/miniters.ll4
-rw-r--r--test/Transforms/LoopVectorize/pr30654-phiscev-sext-trunc.ll240
-rw-r--r--test/Transforms/LoopVectorize/runtime-check-readonly.ll1
-rw-r--r--test/Transforms/LoopVectorize/runtime-check.ll2
-rw-r--r--test/tools/llvm-cov/showTabsHTML.cpp4
-rw-r--r--test/tools/llvm-dwarfdump/X86/verify_debug_info.s193
-rw-r--r--test/tools/llvm-dwarfdump/X86/verify_unit_header_chain.s81
-rw-r--r--test/tools/llvm-mt/help.test7
-rw-r--r--test/tools/llvm-objdump/AArch64/Inputs/reloc-addend.obj.macho-aarch64bin0 -> 424 bytes
-rw-r--r--test/tools/llvm-objdump/AArch64/macho-reloc-addend.test6
-rw-r--r--test/tools/llvm-readobj/Inputs/dynamic-table-so.x86bin8280 -> 8256 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/dynamic-table.c4
-rw-r--r--test/tools/llvm-readobj/dynamic.test39
-rw-r--r--test/tools/llvm-readobj/gnu-sections.test10
340 files changed, 28963 insertions, 3154 deletions
diff --git a/test/Analysis/CostModel/SystemZ/fp-arith.ll b/test/Analysis/CostModel/SystemZ/fp-arith.ll
index 08a7c291138f0..5f92db1ababf5 100644
--- a/test/Analysis/CostModel/SystemZ/fp-arith.ll
+++ b/test/Analysis/CostModel/SystemZ/fp-arith.ll
@@ -1,4 +1,7 @@
-; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-Z13 %s
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z14 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-Z14 %s
;
; Note: The scalarized vector instructions cost is not including any
; extracts, due to the undef operands
@@ -21,13 +24,17 @@ define void @fadd() {
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = fadd float undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = fadd double undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = fadd fp128 undef, undef
-; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fadd <2 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fadd <2 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 1 for instruction: %res3 = fadd <2 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = fadd <2 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fadd <4 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fadd <4 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 1 for instruction: %res5 = fadd <4 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res6 = fadd <4 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fadd <8 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fadd <8 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 2 for instruction: %res7 = fadd <8 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res8 = fadd <8 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fadd <16 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fadd <16 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 4 for instruction: %res9 = fadd <16 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res10 = fadd <16 x double> undef, undef
ret void;
@@ -49,13 +56,17 @@ define void @fsub() {
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = fsub float undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = fsub double undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = fsub fp128 undef, undef
-; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fsub <2 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fsub <2 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 1 for instruction: %res3 = fsub <2 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = fsub <2 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fsub <4 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fsub <4 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 1 for instruction: %res5 = fsub <4 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res6 = fsub <4 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fsub <8 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fsub <8 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 2 for instruction: %res7 = fsub <8 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res8 = fsub <8 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fsub <16 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fsub <16 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 4 for instruction: %res9 = fsub <16 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res10 = fsub <16 x double> undef, undef
ret void;
@@ -77,13 +88,17 @@ define void @fmul() {
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = fmul float undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = fmul double undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = fmul fp128 undef, undef
-; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fmul <2 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fmul <2 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 1 for instruction: %res3 = fmul <2 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = fmul <2 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fmul <4 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fmul <4 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 1 for instruction: %res5 = fmul <4 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res6 = fmul <4 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fmul <8 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fmul <8 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 2 for instruction: %res7 = fmul <8 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res8 = fmul <8 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fmul <16 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fmul <16 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 4 for instruction: %res9 = fmul <16 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res10 = fmul <16 x double> undef, undef
ret void;
@@ -105,13 +120,17 @@ define void @fdiv() {
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = fdiv float undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = fdiv double undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = fdiv fp128 undef, undef
-; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fdiv <2 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fdiv <2 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 1 for instruction: %res3 = fdiv <2 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = fdiv <2 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fdiv <4 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fdiv <4 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 1 for instruction: %res5 = fdiv <4 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res6 = fdiv <4 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fdiv <8 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fdiv <8 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 2 for instruction: %res7 = fdiv <8 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res8 = fdiv <8 x double> undef, undef
-; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fdiv <16 x float> undef, undef
+; CHECK-Z13: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fdiv <16 x float> undef, undef
+; CHECK-Z14: Cost Model: Found an estimated cost of 4 for instruction: %res9 = fdiv <16 x float> undef, undef
; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res10 = fdiv <16 x double> undef, undef
ret void;
diff --git a/test/Assembler/diimportedentity.ll b/test/Assembler/diimportedentity.ll
index bc85ca09f6582..6a0e1eb931c1a 100644
--- a/test/Assembler/diimportedentity.ll
+++ b/test/Assembler/diimportedentity.ll
@@ -18,9 +18,9 @@
; CHECK: !3 = !DICompositeType({{.*}})
!3 = !DICompositeType(tag: DW_TAG_structure_type, name: "Class", size: 32, align: 32)
-; CHECK-NEXT: !4 = !DIImportedEntity(tag: DW_TAG_imported_module, name: "foo", scope: !0, entity: !1, line: 7)
+; CHECK-NEXT: !4 = !DIImportedEntity(tag: DW_TAG_imported_module, name: "foo", scope: !0, entity: !1, file: !2, line: 7)
!4 = !DIImportedEntity(tag: DW_TAG_imported_module, name: "foo", scope: !0,
- entity: !1, line: 7)
+ entity: !1, file: !2, line: 7)
; CHECK-NEXT: !5 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !0)
!5 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !0)
diff --git a/test/Bitcode/DIGlobalVariableExpression.ll b/test/Bitcode/DIGlobalVariableExpression.ll
index 31c3fda1b00ad..3cf082472829e 100644
--- a/test/Bitcode/DIGlobalVariableExpression.ll
+++ b/test/Bitcode/DIGlobalVariableExpression.ll
@@ -36,4 +36,4 @@
!9 = !{!"clang version 4.0.0 (trunk 286129) (llvm/trunk 286128)"}
!10 = distinct !DIGlobalVariable(name: "c", scope: !1, file: !2, line: 1, type: !5, isLocal: false, isDefinition: true, expr: !DIExpression(DW_OP_constu, 23, DW_OP_stack_value))
!11 = distinct !DIGlobalVariable(name: "h", scope: !1, file: !2, line: 2, type: !5, isLocal: false, isDefinition: true)
-!12 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 1, scope: !1, entity: !11)
+!12 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !2, line: 1, scope: !1, entity: !11)
diff --git a/test/Bitcode/compatibility-3.6.ll b/test/Bitcode/compatibility-3.6.ll
index cf6c30e7c26c1..e9313dfba870e 100644
--- a/test/Bitcode/compatibility-3.6.ll
+++ b/test/Bitcode/compatibility-3.6.ll
@@ -368,9 +368,9 @@ declare cc78 void @f.cc78()
declare x86_64_sysvcc void @f.x86_64_sysvcc()
; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc()
declare cc79 void @f.cc79()
-; CHECK: declare x86_64_win64cc void @f.cc79()
-declare x86_64_win64cc void @f.x86_64_win64cc()
-; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.cc79()
+declare win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.x86_64_win64cc()
declare cc80 void @f.cc80()
; CHECK: declare x86_vectorcallcc void @f.cc80()
declare x86_vectorcallcc void @f.x86_vectorcallcc()
diff --git a/test/Bitcode/compatibility-3.7.ll b/test/Bitcode/compatibility-3.7.ll
index 180dad258b682..82fc99055357a 100644
--- a/test/Bitcode/compatibility-3.7.ll
+++ b/test/Bitcode/compatibility-3.7.ll
@@ -368,9 +368,9 @@ declare cc78 void @f.cc78()
declare x86_64_sysvcc void @f.x86_64_sysvcc()
; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc()
declare cc79 void @f.cc79()
-; CHECK: declare x86_64_win64cc void @f.cc79()
-declare x86_64_win64cc void @f.x86_64_win64cc()
-; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.cc79()
+declare win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.x86_64_win64cc()
declare cc80 void @f.cc80()
; CHECK: declare x86_vectorcallcc void @f.cc80()
declare x86_vectorcallcc void @f.x86_vectorcallcc()
diff --git a/test/Bitcode/compatibility-3.8.ll b/test/Bitcode/compatibility-3.8.ll
index 370c7f51a2b7d..2e70a380d10ed 100644
--- a/test/Bitcode/compatibility-3.8.ll
+++ b/test/Bitcode/compatibility-3.8.ll
@@ -393,9 +393,9 @@ declare cc78 void @f.cc78()
declare x86_64_sysvcc void @f.x86_64_sysvcc()
; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc()
declare cc79 void @f.cc79()
-; CHECK: declare x86_64_win64cc void @f.cc79()
-declare x86_64_win64cc void @f.x86_64_win64cc()
-; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.cc79()
+declare win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.x86_64_win64cc()
declare cc80 void @f.cc80()
; CHECK: declare x86_vectorcallcc void @f.cc80()
declare x86_vectorcallcc void @f.x86_vectorcallcc()
diff --git a/test/Bitcode/compatibility-3.9.ll b/test/Bitcode/compatibility-3.9.ll
index 4115cbd8fe64d..7c84daa7d3c44 100644
--- a/test/Bitcode/compatibility-3.9.ll
+++ b/test/Bitcode/compatibility-3.9.ll
@@ -422,9 +422,9 @@ declare cc78 void @f.cc78()
declare x86_64_sysvcc void @f.x86_64_sysvcc()
; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc()
declare cc79 void @f.cc79()
-; CHECK: declare x86_64_win64cc void @f.cc79()
-declare x86_64_win64cc void @f.x86_64_win64cc()
-; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.cc79()
+declare win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.x86_64_win64cc()
declare cc80 void @f.cc80()
; CHECK: declare x86_vectorcallcc void @f.cc80()
declare x86_vectorcallcc void @f.x86_vectorcallcc()
diff --git a/test/Bitcode/compatibility-4.0.ll b/test/Bitcode/compatibility-4.0.ll
index eef925564ecbf..9e34d48c95f76 100644
--- a/test/Bitcode/compatibility-4.0.ll
+++ b/test/Bitcode/compatibility-4.0.ll
@@ -422,9 +422,9 @@ declare cc78 void @f.cc78()
declare x86_64_sysvcc void @f.x86_64_sysvcc()
; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc()
declare cc79 void @f.cc79()
-; CHECK: declare x86_64_win64cc void @f.cc79()
-declare x86_64_win64cc void @f.x86_64_win64cc()
-; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.cc79()
+declare win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.x86_64_win64cc()
declare cc80 void @f.cc80()
; CHECK: declare x86_vectorcallcc void @f.cc80()
declare x86_vectorcallcc void @f.x86_vectorcallcc()
diff --git a/test/Bitcode/compatibility.ll b/test/Bitcode/compatibility.ll
index ebd727ba9aeee..7df1535a69231 100644
--- a/test/Bitcode/compatibility.ll
+++ b/test/Bitcode/compatibility.ll
@@ -425,9 +425,9 @@ declare cc78 void @f.cc78()
declare x86_64_sysvcc void @f.x86_64_sysvcc()
; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc()
declare cc79 void @f.cc79()
-; CHECK: declare x86_64_win64cc void @f.cc79()
-declare x86_64_win64cc void @f.x86_64_win64cc()
-; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc()
+; CHECK: declare win64cc void @f.cc79()
+declare win64cc void @f.win64cc()
+; CHECK: declare win64cc void @f.win64cc()
declare cc80 void @f.cc80()
; CHECK: declare x86_vectorcallcc void @f.cc80()
declare x86_vectorcallcc void @f.x86_vectorcallcc()
diff --git a/test/Bitcode/upgrade-importedentity.ll b/test/Bitcode/upgrade-importedentity.ll
new file mode 100644
index 0000000000000..134ccf1f3eaf5
--- /dev/null
+++ b/test/Bitcode/upgrade-importedentity.ll
@@ -0,0 +1,15 @@
+; RUN: llvm-dis < %s.bc | FileCheck %s
+; RUN: verify-uselistorder < %s.bc
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 308185) (llvm/trunk 308186)", emissionKind: FullDebug, imports: !3)
+!1 = !DIFile(filename: "using.ii", directory: "/")
+!3 = !{!4}
+!4 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !5, entity: !8, line: 301)
+; CHECK: !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !4, entity: !5)
+!5 = !DINamespace(name: "M", scope: null)
+!8 = !DINamespace(name: "N", scope: null)
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 2, !"Debug Info Version", i32 3}
diff --git a/test/Bitcode/upgrade-importedentity.ll.bc b/test/Bitcode/upgrade-importedentity.ll.bc
new file mode 100644
index 0000000000000..7fa833b504627
--- /dev/null
+++ b/test/Bitcode/upgrade-importedentity.ll.bc
Binary files differ
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index b52b6018e0263..124f0c72fd758 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -48,6 +48,7 @@ set(LLVM_TEST_DEPENDS
llvm-cvtres
llvm-diff
llvm-dis
+ llvm-dlltool
llvm-dsymutil
llvm-dwarfdump
llvm-dwp
@@ -58,6 +59,7 @@ set(LLVM_TEST_DEPENDS
llvm-mc
llvm-mcmarkup
llvm-modextract
+ llvm-mt
llvm-nm
llvm-objdump
llvm-opt-report
@@ -65,6 +67,7 @@ set(LLVM_TEST_DEPENDS
llvm-profdata
llvm-ranlib
llvm-readobj
+ llvm-readelf
llvm-rtdyld
llvm-size
llvm-split
diff --git a/test/CodeGen/AArch64/GlobalISel/select-fma.mir b/test/CodeGen/AArch64/GlobalISel/select-fma.mir
new file mode 100644
index 0000000000000..3b2f3746b5877
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-fma.mir
@@ -0,0 +1,41 @@
+# RUN: llc -O0 -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @FMADDSrrr_fpr() { ret void }
+...
+
+---
+# CHECK-LABEL: name: FMADDSrrr_fpr
+name: FMADDSrrr_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: fpr32, preferred-register: '' }
+# CHECK-NEXT: - { id: 2, class: fpr32, preferred-register: '' }
+# CHECK-NEXT: - { id: 3, class: fpr32, preferred-register: '' }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+ - { id: 3, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = COPY %w2
+# CHECK: %3 = FMADDSrrr %0, %1, %2
+body: |
+ bb.0:
+ liveins: %w0, %w1, %w2
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = COPY %w2
+ %3(s32) = G_FMA %0, %1, %2
+ %x0 = COPY %3
+...
+
diff --git a/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll b/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
new file mode 100644
index 0000000000000..2546e7c90ce55
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
+
+define win64cc void @pass_va(i32 %count, ...) nounwind {
+entry:
+; CHECK: sub sp, sp, #80
+; CHECK: add x8, sp, #24
+; CHECK: add x0, sp, #24
+; CHECK: stp x6, x7, [sp, #64]
+; CHECK: stp x4, x5, [sp, #48]
+; CHECK: stp x2, x3, [sp, #32]
+; CHECK: str x1, [sp, #24]
+; CHECK: stp x30, x8, [sp]
+; CHECK: bl other_func
+; CHECK: ldr x30, [sp], #80
+; CHECK: ret
+ %ap = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ %ap2 = load i8*, i8** %ap, align 8
+ call void @other_func(i8* %ap2)
+ ret void
+}
+
+declare void @other_func(i8*) local_unnamed_addr
+
+declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_copy(i8*, i8*) nounwind
+
+; CHECK-LABEL: f9:
+; CHECK: sub sp, sp, #16
+; CHECK: add x8, sp, #24
+; CHECK: add x0, sp, #24
+; CHECK: str x8, [sp, #8]
+; CHECK: add sp, sp, #16
+; CHECK: ret
+define win64cc i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind {
+entry:
+ %ap = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ %ap2 = load i8*, i8** %ap, align 8
+ ret i8* %ap2
+}
+
+; CHECK-LABEL: f8:
+; CHECK: sub sp, sp, #16
+; CHECK: add x8, sp, #16
+; CHECK: add x0, sp, #16
+; CHECK: str x8, [sp, #8]
+; CHECK: add sp, sp, #16
+; CHECK: ret
+define win64cc i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind {
+entry:
+ %ap = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ %ap2 = load i8*, i8** %ap, align 8
+ ret i8* %ap2
+}
+
+; CHECK-LABEL: f7:
+; CHECK: sub sp, sp, #16
+; CHECK: add x8, sp, #8
+; CHECK: add x0, sp, #8
+; CHECK: stp x8, x7, [sp], #16
+; CHECK: ret
+define win64cc i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind {
+entry:
+ %ap = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ %ap2 = load i8*, i8** %ap, align 8
+ ret i8* %ap2
+}
diff --git a/test/CodeGen/AArch64/arm64-abi-varargs.ll b/test/CodeGen/AArch64/arm64-abi-varargs.ll
index 0a79655714806..64a6b9b6b2109 100644
--- a/test/CodeGen/AArch64/arm64-abi-varargs.ll
+++ b/test/CodeGen/AArch64/arm64-abi-varargs.ll
@@ -11,9 +11,8 @@ define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7,
; CHECK: add {{x[0-9]+}}, [[ARGS]], #8
; First vararg
; CHECK: ldr {{w[0-9]+}}, [sp, #72]
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8
; Second vararg
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}], #8
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8
; Third vararg
; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
diff --git a/test/CodeGen/AArch64/arm64-abi_align.ll b/test/CodeGen/AArch64/arm64-abi_align.ll
index b2ea9ad3b4a1c..b844aab5628c6 100644
--- a/test/CodeGen/AArch64/arm64-abi_align.ll
+++ b/test/CodeGen/AArch64/arm64-abi_align.ll
@@ -280,10 +280,10 @@ entry:
define i32 @caller42() #3 {
entry:
; CHECK-LABEL: caller42
-; CHECK: str {{x[0-9]+}}, [sp, #48]
-; CHECK: str {{q[0-9]+}}, [sp, #32]
-; CHECK: str {{x[0-9]+}}, [sp, #16]
-; CHECK: str {{q[0-9]+}}, [sp]
+; CHECK-DAG: str {{x[0-9]+}}, [sp, #48]
+; CHECK-DAG: str {{q[0-9]+}}, [sp, #32]
+; CHECK-DAG: str {{x[0-9]+}}, [sp, #16]
+; CHECK-DAG: str {{q[0-9]+}}, [sp]
; CHECK: add x1, sp, #32
; CHECK: mov x2, sp
; Space for s1 is allocated at sp+32
@@ -318,10 +318,10 @@ entry:
; CHECK-LABEL: caller42_stack
; CHECK: sub sp, sp, #112
; CHECK: add x29, sp, #96
-; CHECK: stur {{x[0-9]+}}, [x29, #-16]
-; CHECK: stur {{q[0-9]+}}, [x29, #-32]
-; CHECK: str {{x[0-9]+}}, [sp, #48]
-; CHECK: str {{q[0-9]+}}, [sp, #32]
+; CHECK-DAG: stur {{x[0-9]+}}, [x29, #-16]
+; CHECK-DAG: stur {{q[0-9]+}}, [x29, #-32]
+; CHECK-DAG: str {{x[0-9]+}}, [sp, #48]
+; CHECK-DAG: str {{q[0-9]+}}, [sp, #32]
; Space for s1 is allocated at x29-32 = sp+64
; Space for s2 is allocated at sp+32
; CHECK: add x[[B:[0-9]+]], sp, #32
@@ -388,10 +388,10 @@ entry:
define i32 @caller43() #3 {
entry:
; CHECK-LABEL: caller43
-; CHECK: str {{q[0-9]+}}, [sp, #48]
-; CHECK: str {{q[0-9]+}}, [sp, #32]
-; CHECK: str {{q[0-9]+}}, [sp, #16]
-; CHECK: str {{q[0-9]+}}, [sp]
+; CHECK-DAG: str {{q[0-9]+}}, [sp, #48]
+; CHECK-DAG: str {{q[0-9]+}}, [sp, #32]
+; CHECK-DAG: str {{q[0-9]+}}, [sp, #16]
+; CHECK-DAG: str {{q[0-9]+}}, [sp]
; CHECK: add x1, sp, #32
; CHECK: mov x2, sp
; Space for s1 is allocated at sp+32
@@ -430,10 +430,10 @@ entry:
; CHECK-LABEL: caller43_stack
; CHECK: sub sp, sp, #112
; CHECK: add x29, sp, #96
-; CHECK: stur {{q[0-9]+}}, [x29, #-16]
-; CHECK: stur {{q[0-9]+}}, [x29, #-32]
-; CHECK: str {{q[0-9]+}}, [sp, #48]
-; CHECK: str {{q[0-9]+}}, [sp, #32]
+; CHECK-DAG: stur {{q[0-9]+}}, [x29, #-16]
+; CHECK-DAG: stur {{q[0-9]+}}, [x29, #-32]
+; CHECK-DAG: str {{q[0-9]+}}, [sp, #48]
+; CHECK-DAG: str {{q[0-9]+}}, [sp, #32]
; Space for s1 is allocated at x29-32 = sp+64
; Space for s2 is allocated at sp+32
; CHECK: add x[[B:[0-9]+]], sp, #32
diff --git a/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll b/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
index a3b740df9b4ee..fdb379871048d 100644
--- a/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
+++ b/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
@@ -1,10 +1,8 @@
; RUN: llc -mtriple=arm64-eabi -mcpu=cyclone < %s | FileCheck %s
; CHECK: foo
-; CHECK: str w[[REG0:[0-9]+]], [x19, #264]
-; CHECK: mov w[[REG1:[0-9]+]], w[[REG0]]
-; CHECK: str w[[REG1]], [x19, #132]
-
+; CHECK-DAG: str w[[REG0:[0-9]+]], [x19, #132]
+; CHECK-DAG: str w[[REG0]], [x19, #264]
define i32 @foo(i32 %a) nounwind {
%retval = alloca i32, align 4
%a.addr = alloca i32, align 4
diff --git a/test/CodeGen/AArch64/arm64-extern-weak.ll b/test/CodeGen/AArch64/arm64-extern-weak.ll
index 990782cb69a00..c98bda0d01a0e 100644
--- a/test/CodeGen/AArch64/arm64-extern-weak.ll
+++ b/test/CodeGen/AArch64/arm64-extern-weak.ll
@@ -1,5 +1,5 @@
; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -o - < %s | FileCheck %s
-; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=static -o - < %s | FileCheck --check-prefix=CHECK %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=static -o - < %s | FileCheck %s
; RUN: llc -mtriple=arm64-none-linux-gnu -code-model=large -o - < %s | FileCheck --check-prefix=CHECK-LARGE %s
declare extern_weak i32 @var()
diff --git a/test/CodeGen/AArch64/arm64-inline-asm.ll b/test/CodeGen/AArch64/arm64-inline-asm.ll
index f849df2a51ec2..848b87fd2cfb1 100644
--- a/test/CodeGen/AArch64/arm64-inline-asm.ll
+++ b/test/CodeGen/AArch64/arm64-inline-asm.ll
@@ -261,3 +261,13 @@ define void @test_inline_modifier_a(i8* %ptr) nounwind {
; CHECK: prfm pldl1keep, [x0]
ret void
}
+
+; PR33134
+define void @test_zero_address() {
+entry:
+; CHECK-LABEL: test_zero_address
+; CHECK: mov {{x[0-9]+}}, xzr
+; CHECK: ldr {{x[0-9]+}}, {{[x[0-9]+]}}
+ tail call i32 asm sideeffect "ldr $0, $1 \0A", "=r,*Q"(i32* null)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-platform-reg.ll b/test/CodeGen/AArch64/arm64-platform-reg.ll
index f3af01a73559f..9b5d8a890fa6f 100644
--- a/test/CodeGen/AArch64/arm64-platform-reg.ll
+++ b/test/CodeGen/AArch64/arm64-platform-reg.ll
@@ -1,6 +1,7 @@
; RUN: llc -mtriple=arm64-apple-ios -mattr=+reserve-x18 -o - %s | FileCheck %s --check-prefix=CHECK-RESERVE-X18
; RUN: llc -mtriple=arm64-freebsd-gnu -mattr=+reserve-x18 -o - %s | FileCheck %s --check-prefix=CHECK-RESERVE-X18
; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-windows -o - %s | FileCheck %s --check-prefix=CHECK-RESERVE-X18
; x18 is reserved as a platform register on Darwin but not on other
; systems. Create loads of register pressure and make sure this is respected.
diff --git a/test/CodeGen/AArch64/arm64-vext.ll b/test/CodeGen/AArch64/arm64-vext.ll
index b315e4c409b03..c1edf1b2e9bfa 100644
--- a/test/CodeGen/AArch64/arm64-vext.ll
+++ b/test/CodeGen/AArch64/arm64-vext.ll
@@ -116,7 +116,7 @@ define void @test_vext_p16() nounwind ssp {
define void @test_vext_s32() nounwind ssp {
; CHECK-LABEL: test_vext_s32:
- ; CHECK: {{ext.8.*#4}}
+ ; CHECK: {{rev64.2s.*}}
%xS32x2 = alloca <2 x i32>, align 8
%__a = alloca <2 x i32>, align 8
%__b = alloca <2 x i32>, align 8
@@ -137,7 +137,7 @@ define void @test_vext_s32() nounwind ssp {
define void @test_vext_u32() nounwind ssp {
; CHECK-LABEL: test_vext_u32:
- ; CHECK: {{ext.8.*#4}}
+ ; CHECK: {{rev64.2s.*}}
%xU32x2 = alloca <2 x i32>, align 8
%__a = alloca <2 x i32>, align 8
%__b = alloca <2 x i32>, align 8
@@ -158,7 +158,7 @@ define void @test_vext_u32() nounwind ssp {
define void @test_vext_f32() nounwind ssp {
; CHECK-LABEL: test_vext_f32:
- ; CHECK: {{ext.8.*#4}}
+ ; CHECK: {{rev64.2s.*}}
%xF32x2 = alloca <2 x float>, align 8
%__a = alloca <2 x float>, align 8
%__b = alloca <2 x float>, align 8
@@ -179,7 +179,7 @@ define void @test_vext_f32() nounwind ssp {
define void @test_vext_s64() nounwind ssp {
; CHECK-LABEL: test_vext_s64:
- ; CHECK_FIXME: {{ext.8.*#1}}
+ ; CHECK_FIXME: {{rev64.2s.*}}
; this just turns into a load of the second element
%xS64x1 = alloca <1 x i64>, align 8
%__a = alloca <1 x i64>, align 8
diff --git a/test/CodeGen/AArch64/atomic-ops-lse.ll b/test/CodeGen/AArch64/atomic-ops-lse.ll
index a85eb6b46aff2..a0c418bff5734 100644
--- a/test/CodeGen/AArch64/atomic-ops-lse.ll
+++ b/test/CodeGen/AArch64/atomic-ops-lse.ll
@@ -681,3 +681,164 @@ define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
ret i64 %old
}
+define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i8:
+ %old = atomicrmw sub i8* @var8, i8 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
+
+; CHECK: ldaddalb w[[NEG]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i16:
+ %old = atomicrmw sub i16* @var16, i16 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
+
+; CHECK: ldaddalh w[[NEG]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i32:
+ %old = atomicrmw sub i32* @var32, i32 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
+
+; CHECK: ldaddal w[[NEG]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i64:
+ %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
+
+; CHECK: ldaddal x[[NEG]], x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+ ret i64 %old
+}
+
+define void @test_atomic_load_sub_i32_noret(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i32_noret:
+ atomicrmw sub i32* @var32, i32 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
+
+; CHECK: ldaddal w[[NEG]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+ ret void
+}
+
+define void @test_atomic_load_sub_i64_noret(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i64_noret:
+ atomicrmw sub i64* @var64, i64 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
+
+; CHECK: ldaddal x[[NEG]], x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+
+ ret void
+}
+
+define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i8:
+ %old = atomicrmw and i8* @var8, i8 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
+
+; CHECK: ldclralb w[[NOT]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i16:
+ %old = atomicrmw and i16* @var16, i16 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
+
+; CHECK: ldclralh w[[NOT]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i32:
+ %old = atomicrmw and i32* @var32, i32 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
+
+; CHECK: ldclral w[[NOT]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i64:
+ %old = atomicrmw and i64* @var64, i64 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
+
+; CHECK: ldclral x[[NOT]], x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+ ret i64 %old
+}
+
+define void @test_atomic_load_and_i32_noret(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i32_noret:
+ atomicrmw and i32* @var32, i32 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
+
+; CHECK: ldclral w[[NOT]], w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+ ret void
+}
+
+define void @test_atomic_load_and_i64_noret(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i64_noret:
+ atomicrmw and i64* @var64, i64 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
+; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
+
+; CHECK: ldclral x[[NOT]], x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK-NOT: dmb
+ ret void
+}
diff --git a/test/CodeGen/AArch64/dag-combine-invaraints.ll b/test/CodeGen/AArch64/dag-combine-invaraints.ll
index 20ba3fea83779..a2fa1db8a8ace 100644
--- a/test/CodeGen/AArch64/dag-combine-invaraints.ll
+++ b/test/CodeGen/AArch64/dag-combine-invaraints.ll
@@ -9,7 +9,7 @@ main_:
%i32T = alloca i32, align 4
%i32F = alloca i32, align 4
%i32X = alloca i32, align 4
- store i32 0, i32* %tmp
+ store i32 %argc, i32* %tmp
store i32 15, i32* %i32T, align 4
store i32 5, i32* %i32F, align 4
%tmp6 = load i32, i32* %tmp, align 4
diff --git a/test/CodeGen/AArch64/extern-weak.ll b/test/CodeGen/AArch64/extern-weak.ll
index ac2153ad8ffed..5671a1070138d 100644
--- a/test/CodeGen/AArch64/extern-weak.ll
+++ b/test/CodeGen/AArch64/extern-weak.ll
@@ -1,5 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=static -o - < %s | FileCheck --check-prefix=CHECK %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=static -o - < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large -o - %s | FileCheck --check-prefix=CHECK-LARGE %s
declare extern_weak i32 @var()
diff --git a/test/CodeGen/AArch64/falkor-hwpf-fix.ll b/test/CodeGen/AArch64/falkor-hwpf-fix.ll
new file mode 100644
index 0000000000000..9f2af5adce71a
--- /dev/null
+++ b/test/CodeGen/AArch64/falkor-hwpf-fix.ll
@@ -0,0 +1,67 @@
+; RUN: llc < %s -mtriple aarch64 -mcpu=falkor -disable-post-ra | FileCheck %s
+
+; Check that strided load tag collisions are avoided on Falkor.
+
+; CHECK-LABEL: hwpf1:
+; CHECK: ldp {{w[0-9]+}}, {{w[0-9]+}}, [x[[BASE:[0-9]+]], #-16]
+; CHECK: mov x[[BASE2:[0-9]+]], x[[BASE]]
+; CHECK: ldp {{w[0-9]+}}, {{w[0-9]+}}, [x[[BASE2]], #-8]
+; CHECK: ldp {{w[0-9]+}}, {{w[0-9]+}}, [x[[BASE3:[0-9]+]]]
+; CHECK: mov x[[BASE4:[0-9]+]], x[[BASE3]]
+; CHECK: ldp {{w[0-9]+}}, {{w[0-9]+}}, [x[[BASE4]], #8]
+
+define void @hwpf1(i32* %p, i32* %sp, i32* %sp2, i32* %sp3, i32* %sp4) {
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
+
+ %gep = getelementptr inbounds i32, i32* %p, i32 %iv
+ %load1 = load i32, i32* %gep
+
+ %gep2 = getelementptr inbounds i32, i32* %gep, i32 1
+ %load2 = load i32, i32* %gep2
+
+ %add = add i32 %load1, %load2
+ %storegep = getelementptr inbounds i32, i32* %sp, i32 %iv
+ store i32 %add, i32* %storegep
+
+ %gep3 = getelementptr inbounds i32, i32* %gep, i32 2
+ %load3 = load i32, i32* %gep3
+
+ %gep4 = getelementptr inbounds i32, i32* %gep, i32 3
+ %load4 = load i32, i32* %gep4
+
+ %add2 = add i32 %load3, %load4
+ %storegep2 = getelementptr inbounds i32, i32* %sp2, i32 %iv
+ store i32 %add2, i32* %storegep2
+
+ %gep5 = getelementptr inbounds i32, i32* %gep, i32 4
+ %load5 = load i32, i32* %gep5
+
+ %gep6 = getelementptr inbounds i32, i32* %gep, i32 5
+ %load6 = load i32, i32* %gep6
+
+ %add3 = add i32 %load5, %load6
+ %storegep3 = getelementptr inbounds i32, i32* %sp3, i32 %iv
+ store i32 %add3, i32* %storegep3
+
+ %gep7 = getelementptr inbounds i32, i32* %gep, i32 6
+ %load7 = load i32, i32* %gep7
+
+ %gep8 = getelementptr inbounds i32, i32* %gep, i32 7
+ %load8 = load i32, i32* %gep8
+
+ %add4 = add i32 %load7, %load8
+ %storegep4 = getelementptr inbounds i32, i32* %sp4, i32 %iv
+ store i32 %add4, i32* %storegep4
+
+ %inc = add i32 %iv, 8
+ %exitcnd = icmp uge i32 %inc, 1024
+ br i1 %exitcnd, label %exit, label %loop
+
+exit:
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/falkor-hwpf-fix.mir b/test/CodeGen/AArch64/falkor-hwpf-fix.mir
new file mode 100644
index 0000000000000..54c8b16a9b439
--- /dev/null
+++ b/test/CodeGen/AArch64/falkor-hwpf-fix.mir
@@ -0,0 +1,52 @@
+# RUN: llc -mtriple=aarch64-linux-gnu -mcpu=falkor -run-pass falkor-hwpf-fix-late -o - %s | FileCheck %s
+--- |
+ @g = external global i32
+
+ define void @hwpf1() { ret void }
+ define void @hwpf2() { ret void }
+...
+---
+# Verify that the tag collision between the loads is resolved.
+# CHECK-LABEL: name: hwpf1
+# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
+# CHECK: LDRWui %[[BASE]], 0
+# CHECK: LDRWui %x1, 1
+name: hwpf1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: %w0, %x1
+
+ %w2 = LDRWui %x1, 0 :: ("aarch64-strided-access" load 4 from @g)
+ %w2 = LDRWui %x1, 1
+
+ %w0 = SUBWri %w0, 1, 0
+ %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
+ Bcc 9, %bb.0, implicit %nzcv
+
+ bb.1:
+ RET_ReallyLR
+...
+---
+# Verify that the tag collision between the loads is resolved and written back for post increment addressing.
+# CHECK-LABEL: name: hwpf2
+# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
+# CHECK: LDRWpost %[[BASE]], 0
+# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0
+# CHECK: LDRWui %x1, 1
+name: hwpf2
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: %w0, %x1
+
+ %x1, %w2 = LDRWpost %x1, 0 :: ("aarch64-strided-access" load 4 from @g)
+ %w2 = LDRWui %x1, 1
+
+ %w0 = SUBWri %w0, 1, 0
+ %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
+ Bcc 9, %bb.0, implicit %nzcv
+
+ bb.1:
+ RET_ReallyLR
+...
diff --git a/test/CodeGen/AArch64/falkor-hwpf.ll b/test/CodeGen/AArch64/falkor-hwpf.ll
new file mode 100644
index 0000000000000..bbe7febe397fa
--- /dev/null
+++ b/test/CodeGen/AArch64/falkor-hwpf.ll
@@ -0,0 +1,106 @@
+; RUN: opt < %s -S -falkor-hwpf-fix -mtriple aarch64 -mcpu=falkor | FileCheck %s
+; RUN: opt < %s -S -falkor-hwpf-fix -mtriple aarch64 -mcpu=cortex-a57 | FileCheck %s --check-prefix=NOHWPF
+
+; Check that strided access metadata is added to loads in inner loops when compiling for Falkor.
+
+; CHECK-LABEL: @hwpf1(
+; CHECK: load i32, i32* %gep, !falkor.strided.access !0
+; CHECK: load i32, i32* %gep2, !falkor.strided.access !0
+
+; NOHWPF-LABEL: @hwpf1(
+; NOHWPF: load i32, i32* %gep{{$}}
+; NOHWPF: load i32, i32* %gep2{{$}}
+define void @hwpf1(i32* %p, i32* %p2) {
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
+
+ %gep = getelementptr inbounds i32, i32* %p, i32 %iv
+ %load = load i32, i32* %gep
+
+ %gep2 = getelementptr inbounds i32, i32* %p2, i32 %iv
+ %load2 = load i32, i32* %gep2
+
+ %inc = add i32 %iv, 1
+ %exitcnd = icmp uge i32 %inc, 1024
+ br i1 %exitcnd, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; Check that outer loop strided load isn't marked.
+; CHECK-LABEL: @hwpf2(
+; CHECK: load i32, i32* %gep, !falkor.strided.access !0
+; CHECK: load i32, i32* %gep2{{$}}
+
+; NOHWPF-LABEL: @hwpf2(
+; NOHWPF: load i32, i32* %gep{{$}}
+; NOHWPF: load i32, i32* %gep2{{$}}
+define void @hwpf2(i32* %p) {
+entry:
+ br label %loop1
+
+loop1:
+ %iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ]
+ %outer.sum = phi i32 [ 0, %entry ], [ %sum, %loop1.latch ]
+ br label %loop2.header
+
+loop2.header:
+ br label %loop2
+
+loop2:
+ %iv2 = phi i32 [ 0, %loop2.header ], [ %inc2, %loop2 ]
+ %sum = phi i32 [ %outer.sum, %loop2.header ], [ %sum.inc, %loop2 ]
+ %gep = getelementptr inbounds i32, i32* %p, i32 %iv2
+ %load = load i32, i32* %gep
+ %sum.inc = add i32 %sum, %load
+ %inc2 = add i32 %iv2, 1
+ %exitcnd2 = icmp uge i32 %inc2, 1024
+ br i1 %exitcnd2, label %exit2, label %loop2
+
+exit2:
+ %gep2 = getelementptr inbounds i32, i32* %p, i32 %iv1
+ %load2 = load i32, i32* %gep2
+ br label %loop1.latch
+
+loop1.latch:
+ %inc1 = add i32 %iv1, 1
+ %exitcnd1 = icmp uge i32 %inc1, 1024
+ br i1 %exitcnd2, label %exit, label %loop1
+
+exit:
+ ret void
+}
+
+
+; Check that non-strided load isn't marked.
+; CHECK-LABEL: @hwpf3(
+; CHECK: load i32, i32* %gep, !falkor.strided.access !0
+; CHECK: load i32, i32* %gep2{{$}}
+
+; NOHWPF-LABEL: @hwpf3(
+; NOHWPF: load i32, i32* %gep{{$}}
+; NOHWPF: load i32, i32* %gep2{{$}}
+define void @hwpf3(i32* %p, i32* %p2) {
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
+
+ %gep = getelementptr inbounds i32, i32* %p, i32 %iv
+ %load = load i32, i32* %gep
+
+ %gep2 = getelementptr inbounds i32, i32* %p2, i32 %load
+ %load2 = load i32, i32* %gep2
+
+ %inc = add i32 %iv, 1
+ %exitcnd = icmp uge i32 %inc, 1024
+ br i1 %exitcnd, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/preferred-function-alignment.ll b/test/CodeGen/AArch64/preferred-function-alignment.ll
index 88e6f5dd01c91..386a6ecccf542 100644
--- a/test/CodeGen/AArch64/preferred-function-alignment.ll
+++ b/test/CodeGen/AArch64/preferred-function-alignment.ll
@@ -1,7 +1,6 @@
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=generic < %s | FileCheck --check-prefix=ALIGN2 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a35 < %s | FileCheck --check-prefix=ALIGN2 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a53 < %s | FileCheck --check-prefix=ALIGN2 %s
-; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a73 < %s | FileCheck --check-prefix=ALIGN2 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cyclone < %s | FileCheck --check-prefix=ALIGN2 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=falkor < %s | FileCheck --check-prefix=ALIGN2 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=kryo < %s | FileCheck --check-prefix=ALIGN2 %s
@@ -12,6 +11,7 @@
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=thunderx2t99 < %s | FileCheck --check-prefix=ALIGN3 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a57 < %s | FileCheck --check-prefix=ALIGN4 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a72 < %s | FileCheck --check-prefix=ALIGN4 %s
+; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=cortex-a73 < %s | FileCheck --check-prefix=ALIGN4 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=exynos-m1 < %s | FileCheck --check-prefix=ALIGN4 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=exynos-m2 < %s | FileCheck --check-prefix=ALIGN4 %s
; RUN: llc -mtriple=aarch64-unknown-linux -mcpu=exynos-m3 < %s | FileCheck --check-prefix=ALIGN4 %s
diff --git a/test/CodeGen/AArch64/swifterror.ll b/test/CodeGen/AArch64/swifterror.ll
index bc28f477c8104..bcad19e391d0b 100644
--- a/test/CodeGen/AArch64/swifterror.ll
+++ b/test/CodeGen/AArch64/swifterror.ll
@@ -309,17 +309,17 @@ define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
; CHECK-APPLE-LABEL: foo_vararg:
; CHECK-APPLE: orr w0, wzr, #0x10
; CHECK-APPLE: malloc
-; CHECK-APPLE: orr [[ID:w[0-9]+]], wzr, #0x1
-; CHECK-APPLE: add [[ARGS:x[0-9]+]], [[TMP:x[0-9]+]], #16
-; CHECK-APPLE: strb [[ID]], [x0, #8]
+; CHECK-APPLE-DAG: orr [[ID:w[0-9]+]], wzr, #0x1
+; CHECK-APPLE-DAG: add [[ARGS:x[0-9]+]], [[TMP:x[0-9]+]], #16
+; CHECK-APPLE-DAG: strb [[ID]], [x0, #8]
; First vararg
; CHECK-APPLE-DAG: orr {{x[0-9]+}}, [[ARGS]], #0x8
; CHECK-APPLE-DAG: ldr {{w[0-9]+}}, [{{.*}}[[TMP]], #16]
-; CHECK-APPLE: add {{x[0-9]+}}, {{x[0-9]+}}, #8
+; CHECK-APPLE-DAG: add {{x[0-9]+}}, {{x[0-9]+}}, #8
; Second vararg
-; CHECK-APPLE: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
-; CHECK-APPLE: add {{x[0-9]+}}, {{x[0-9]+}}, #8
+; CHECK-APPLE-DAG: ldr {{w[0-9]+}}, [{{x[0-9]+}}], #8
+; CHECK-APPLE-DAG: add {{x[0-9]+}}, {{x[0-9]+}}, #16
; Third vararg
; CHECK-APPLE: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
diff --git a/test/CodeGen/AArch64/win64_vararg.ll b/test/CodeGen/AArch64/win64_vararg.ll
new file mode 100644
index 0000000000000..b760e4acd16a4
--- /dev/null
+++ b/test/CodeGen/AArch64/win64_vararg.ll
@@ -0,0 +1,95 @@
+; RUN: llc < %s -mtriple=aarch64-pc-win32 | FileCheck %s
+
+define void @pass_va(i32 %count, ...) nounwind {
+entry:
+; CHECK: sub sp, sp, #80
+; CHECK: add x8, sp, #24
+; CHECK: add x0, sp, #24
+; CHECK: stp x6, x7, [sp, #64]
+; CHECK: stp x4, x5, [sp, #48]
+; CHECK: stp x2, x3, [sp, #32]
+; CHECK: str x1, [sp, #24]
+; CHECK: stp x30, x8, [sp]
+; CHECK: bl other_func
+; CHECK: ldr x30, [sp], #80
+; CHECK: ret
+ %ap = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ %ap2 = load i8*, i8** %ap, align 8
+ call void @other_func(i8* %ap2)
+ ret void
+}
+
+declare void @other_func(i8*) local_unnamed_addr
+
+declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_copy(i8*, i8*) nounwind
+
+; CHECK-LABEL: f9:
+; CHECK: sub sp, sp, #16
+; CHECK: add x8, sp, #24
+; CHECK: add x0, sp, #24
+; CHECK: str x8, [sp, #8]
+; CHECK: add sp, sp, #16
+; CHECK: ret
+define i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind {
+entry:
+ %ap = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ %ap2 = load i8*, i8** %ap, align 8
+ ret i8* %ap2
+}
+
+; CHECK-LABEL: f8:
+; CHECK: sub sp, sp, #16
+; CHECK: add x8, sp, #16
+; CHECK: add x0, sp, #16
+; CHECK: str x8, [sp, #8]
+; CHECK: add sp, sp, #16
+; CHECK: ret
+define i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind {
+entry:
+ %ap = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ %ap2 = load i8*, i8** %ap, align 8
+ ret i8* %ap2
+}
+
+; CHECK-LABEL: f7:
+; CHECK: sub sp, sp, #16
+; CHECK: add x8, sp, #8
+; CHECK: add x0, sp, #8
+; CHECK: stp x8, x7, [sp], #16
+; CHECK: ret
+define i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind {
+entry:
+ %ap = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ %ap2 = load i8*, i8** %ap, align 8
+ ret i8* %ap2
+}
+
+; CHECK-LABEL: copy1:
+; CHECK: sub sp, sp, #80
+; CHECK: add x8, sp, #24
+; CHECK: stp x6, x7, [sp, #64]
+; CHECK: stp x4, x5, [sp, #48]
+; CHECK: stp x2, x3, [sp, #32]
+; CHECK: stp x8, x1, [sp, #16]
+; CHECK: str x8, [sp, #8]
+; CHECK: add sp, sp, #80
+; CHECK: ret
+define void @copy1(i64 %a0, ...) nounwind {
+entry:
+ %ap = alloca i8*, align 8
+ %cp = alloca i8*, align 8
+ %ap1 = bitcast i8** %ap to i8*
+ %cp1 = bitcast i8** %cp to i8*
+ call void @llvm.va_start(i8* %ap1)
+ call void @llvm.va_copy(i8* %cp1, i8* %ap1)
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
new file mode 100644
index 0000000000000..e9797eff712b7
--- /dev/null
+++ b/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
@@ -0,0 +1,312 @@
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-annotate-kernel-features %s | FileCheck -check-prefix=HSA %s
+
+declare i32 @llvm.amdgcn.workgroup.id.x() #0
+declare i32 @llvm.amdgcn.workgroup.id.y() #0
+declare i32 @llvm.amdgcn.workgroup.id.z() #0
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare i32 @llvm.amdgcn.workitem.id.y() #0
+declare i32 @llvm.amdgcn.workitem.id.z() #0
+
+declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #0
+declare i8 addrspace(2)* @llvm.amdgcn.queue.ptr() #0
+declare i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr() #0
+declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #0
+declare i64 @llvm.amdgcn.dispatch.id() #0
+
+; HSA: define void @use_workitem_id_x() #1 {
+define void @use_workitem_id_x() #1 {
+ %val = call i32 @llvm.amdgcn.workitem.id.x()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_workitem_id_y() #2 {
+define void @use_workitem_id_y() #1 {
+ %val = call i32 @llvm.amdgcn.workitem.id.y()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_workitem_id_z() #3 {
+define void @use_workitem_id_z() #1 {
+ %val = call i32 @llvm.amdgcn.workitem.id.z()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_workgroup_id_x() #4 {
+define void @use_workgroup_id_x() #1 {
+ %val = call i32 @llvm.amdgcn.workgroup.id.x()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_workgroup_id_y() #5 {
+define void @use_workgroup_id_y() #1 {
+ %val = call i32 @llvm.amdgcn.workgroup.id.y()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_workgroup_id_z() #6 {
+define void @use_workgroup_id_z() #1 {
+ %val = call i32 @llvm.amdgcn.workgroup.id.z()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_dispatch_ptr() #7 {
+define void @use_dispatch_ptr() #1 {
+ %dispatch.ptr = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
+ store volatile i8 addrspace(2)* %dispatch.ptr, i8 addrspace(2)* addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_queue_ptr() #8 {
+define void @use_queue_ptr() #1 {
+ %queue.ptr = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr()
+ store volatile i8 addrspace(2)* %queue.ptr, i8 addrspace(2)* addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_dispatch_id() #9 {
+define void @use_dispatch_id() #1 {
+ %val = call i64 @llvm.amdgcn.dispatch.id()
+ store volatile i64 %val, i64 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_workgroup_id_y_workgroup_id_z() #10 {
+define void @use_workgroup_id_y_workgroup_id_z() #1 {
+ %val0 = call i32 @llvm.amdgcn.workgroup.id.y()
+ %val1 = call i32 @llvm.amdgcn.workgroup.id.z()
+ store volatile i32 %val0, i32 addrspace(1)* undef
+ store volatile i32 %val1, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workitem_id_x() #1 {
+define void @func_indirect_use_workitem_id_x() #1 {
+ call void @use_workitem_id_x()
+ ret void
+}
+
+; HSA: define void @kernel_indirect_use_workitem_id_x() #1 {
+define void @kernel_indirect_use_workitem_id_x() #1 {
+ call void @use_workitem_id_x()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workitem_id_y() #2 {
+define void @func_indirect_use_workitem_id_y() #1 {
+ call void @use_workitem_id_y()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workitem_id_z() #3 {
+define void @func_indirect_use_workitem_id_z() #1 {
+ call void @use_workitem_id_z()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workgroup_id_x() #4 {
+define void @func_indirect_use_workgroup_id_x() #1 {
+ call void @use_workgroup_id_x()
+ ret void
+}
+
+; HSA: define void @kernel_indirect_use_workgroup_id_x() #4 {
+define void @kernel_indirect_use_workgroup_id_x() #1 {
+ call void @use_workgroup_id_x()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workgroup_id_y() #5 {
+define void @func_indirect_use_workgroup_id_y() #1 {
+ call void @use_workgroup_id_y()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workgroup_id_z() #6 {
+define void @func_indirect_use_workgroup_id_z() #1 {
+ call void @use_workgroup_id_z()
+ ret void
+}
+
+; HSA: define void @func_indirect_indirect_use_workgroup_id_y() #5 {
+define void @func_indirect_indirect_use_workgroup_id_y() #1 {
+ call void @func_indirect_use_workgroup_id_y()
+ ret void
+}
+
+; HSA: define void @indirect_x2_use_workgroup_id_y() #5 {
+define void @indirect_x2_use_workgroup_id_y() #1 {
+ call void @func_indirect_indirect_use_workgroup_id_y()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_dispatch_ptr() #7 {
+define void @func_indirect_use_dispatch_ptr() #1 {
+ call void @use_dispatch_ptr()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_queue_ptr() #8 {
+define void @func_indirect_use_queue_ptr() #1 {
+ call void @use_queue_ptr()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_dispatch_id() #9 {
+define void @func_indirect_use_dispatch_id() #1 {
+ call void @use_dispatch_id()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #11 {
+define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #1 {
+ call void @func_indirect_use_workgroup_id_y_workgroup_id_z()
+ ret void
+}
+
+; HSA: define void @recursive_use_workitem_id_y() #2 {
+define void @recursive_use_workitem_id_y() #1 {
+ %val = call i32 @llvm.amdgcn.workitem.id.y()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ call void @recursive_use_workitem_id_y()
+ ret void
+}
+
+; HSA: define void @call_recursive_use_workitem_id_y() #2 {
+define void @call_recursive_use_workitem_id_y() #1 {
+ call void @recursive_use_workitem_id_y()
+ ret void
+}
+
+; HSA: define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #8 {
+define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 {
+ %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
+ store volatile i32 0, i32 addrspace(4)* %stof
+ ret void
+}
+
+; HSA: define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #12 {
+define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #2 {
+ %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
+ store volatile i32 0, i32 addrspace(4)* %stof
+ ret void
+}
+
+; HSA: define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #13 {
+define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #2 {
+ %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
+ store volatile i32 0, i32 addrspace(4)* %stof
+ call void @func_indirect_use_queue_ptr()
+ ret void
+}
+
+; HSA: define void @indirect_use_group_to_flat_addrspacecast() #8 {
+define void @indirect_use_group_to_flat_addrspacecast() #1 {
+ call void @use_group_to_flat_addrspacecast(i32 addrspace(3)* null)
+ ret void
+}
+
+; HSA: define void @indirect_use_group_to_flat_addrspacecast_gfx9() #11 {
+define void @indirect_use_group_to_flat_addrspacecast_gfx9() #1 {
+ call void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* null)
+ ret void
+}
+
+; HSA: define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #8 {
+define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #1 {
+ call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* null)
+ ret void
+}
+
+; HSA: define void @use_kernarg_segment_ptr() #14 {
+define void @use_kernarg_segment_ptr() #1 {
+ %kernarg.segment.ptr = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
+ store volatile i8 addrspace(2)* %kernarg.segment.ptr, i8 addrspace(2)* addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @func_indirect_use_kernarg_segment_ptr() #14 {
+define void @func_indirect_use_kernarg_segment_ptr() #1 {
+ call void @use_kernarg_segment_ptr()
+ ret void
+}
+
+; HSA: define void @use_implicitarg_ptr() #14 {
+define void @use_implicitarg_ptr() #1 {
+ %implicitarg.ptr = call i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr()
+ store volatile i8 addrspace(2)* %implicitarg.ptr, i8 addrspace(2)* addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @func_indirect_use_implicitarg_ptr() #14 {
+define void @func_indirect_use_implicitarg_ptr() #1 {
+ call void @use_implicitarg_ptr()
+ ret void
+}
+
+; HSA: declare void @external.func() #15
+declare void @external.func() #3
+
+; HSA: define internal void @defined.func() #15 {
+define internal void @defined.func() #3 {
+ ret void
+}
+
+; HSA: define void @func_call_external() #15 {
+define void @func_call_external() #3 {
+ call void @external.func()
+ ret void
+}
+
+; HSA: define void @func_call_defined() #15 {
+define void @func_call_defined() #3 {
+ call void @defined.func()
+ ret void
+}
+
+; HSA: define void @func_call_asm() #15 {
+define void @func_call_asm() #3 {
+ call void asm sideeffect "", ""() #3
+ ret void
+}
+
+; HSA: define amdgpu_kernel void @kern_call_external() #16 {
+define amdgpu_kernel void @kern_call_external() #3 {
+ call void @external.func()
+ ret void
+}
+
+; HSA: define amdgpu_kernel void @func_kern_defined() #16 {
+define amdgpu_kernel void @func_kern_defined() #3 {
+ call void @defined.func()
+ ret void
+}
+
+attributes #0 = { nounwind readnone speculatable }
+attributes #1 = { nounwind "target-cpu"="fiji" }
+attributes #2 = { nounwind "target-cpu"="gfx900" }
+attributes #3 = { nounwind }
+
+; HSA: attributes #0 = { nounwind readnone speculatable }
+; HSA: attributes #1 = { nounwind "amdgpu-work-item-id-x" "target-cpu"="fiji" }
+; HSA: attributes #2 = { nounwind "amdgpu-work-item-id-y" "target-cpu"="fiji" }
+; HSA: attributes #3 = { nounwind "amdgpu-work-item-id-z" "target-cpu"="fiji" }
+; HSA: attributes #4 = { nounwind "amdgpu-work-group-id-x" "target-cpu"="fiji" }
+; HSA: attributes #5 = { nounwind "amdgpu-work-group-id-y" "target-cpu"="fiji" }
+; HSA: attributes #6 = { nounwind "amdgpu-work-group-id-z" "target-cpu"="fiji" }
+; HSA: attributes #7 = { nounwind "amdgpu-dispatch-ptr" "target-cpu"="fiji" }
+; HSA: attributes #8 = { nounwind "amdgpu-queue-ptr" "target-cpu"="fiji" }
+; HSA: attributes #9 = { nounwind "amdgpu-dispatch-id" "target-cpu"="fiji" }
+; HSA: attributes #10 = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "target-cpu"="fiji" }
+; HSA: attributes #11 = { nounwind "target-cpu"="fiji" }
+; HSA: attributes #12 = { nounwind "target-cpu"="gfx900" }
+; HSA: attributes #13 = { nounwind "amdgpu-queue-ptr" "target-cpu"="gfx900" }
+; HSA: attributes #14 = { nounwind "amdgpu-kernarg-segment-ptr" "target-cpu"="fiji" }
+; HSA: attributes #15 = { nounwind }
+; HSA: attributes #16 = { nounwind "amdgpu-flat-scratch" }
diff --git a/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll b/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
index f7461b925ca15..3059a95a50987 100644
--- a/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ b/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -10,6 +10,7 @@ declare i32 @llvm.amdgcn.workitem.id.z() #0
declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #0
declare i8 addrspace(2)* @llvm.amdgcn.queue.ptr() #0
+declare i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr() #0
; HSA: define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
@@ -164,6 +165,15 @@ define amdgpu_kernel void @use_queue_ptr(i32 addrspace(1)* %ptr) #1 {
ret void
}
+; HSA: define amdgpu_kernel void @use_kernarg_segment_ptr(i32 addrspace(1)* %ptr) #12 {
+define amdgpu_kernel void @use_kernarg_segment_ptr(i32 addrspace(1)* %ptr) #1 {
+ %dispatch.ptr = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
+ %bc = bitcast i8 addrspace(2)* %dispatch.ptr to i32 addrspace(2)*
+ %val = load i32, i32 addrspace(2)* %bc
+ store i32 %val, i32 addrspace(1)* %ptr
+ ret void
+}
+
; HSA: define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #11 {
define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 {
%stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
@@ -236,3 +246,4 @@ attributes #1 = { nounwind }
; HSA: attributes #9 = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" }
; HSA: attributes #10 = { nounwind "amdgpu-dispatch-ptr" }
; HSA: attributes #11 = { nounwind "amdgpu-queue-ptr" }
+; HSA: attributes #12 = { nounwind "amdgpu-kernarg-segment-ptr" }
diff --git a/test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll b/test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll
index 63a6f6a8d32c7..a0694fb1e3c91 100644
--- a/test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll
+++ b/test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll
@@ -36,7 +36,7 @@ attributes #2 = {"amdgpu-flat-work-group-size"="128,128"}
; CHECK-LABEL: {{^}}min_1024_max_2048
; CHECK: SGPRBlocks: 1
; CHECK: VGPRBlocks: 7
-; CHECK: NumSGPRsForWavesPerEU: 13
+; CHECK: NumSGPRsForWavesPerEU: 12
; CHECK: NumVGPRsForWavesPerEU: 32
@var = addrspace(1) global float 0.0
define amdgpu_kernel void @min_1024_max_2048() #3 {
diff --git a/test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll b/test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll
index 3dda73bc336ed..a5e97205de213 100644
--- a/test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll
+++ b/test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll
@@ -118,7 +118,7 @@ attributes #8 = {"amdgpu-waves-per-eu"="5,10"}
; CHECK-LABEL: {{^}}exactly_10:
; CHECK: SGPRBlocks: 1
; CHECK: VGPRBlocks: 5
-; CHECK: NumSGPRsForWavesPerEU: 13
+; CHECK: NumSGPRsForWavesPerEU: 12
; CHECK: NumVGPRsForWavesPerEU: 24
define amdgpu_kernel void @exactly_10() #9 {
%val0 = load volatile float, float addrspace(1)* @var
@@ -188,3 +188,15 @@ define amdgpu_kernel void @exactly_10() #9 {
ret void
}
attributes #9 = {"amdgpu-waves-per-eu"="10,10"}
+
+; Exactly 256 workitems and exactly 2 waves.
+; CHECK-LABEL: {{^}}empty_workitems_exactly_256_waves_exactly_2:
+; CHECK: SGPRBlocks: 12
+; CHECK: VGPRBlocks: 21
+; CHECK: NumSGPRsForWavesPerEU: 102
+; CHECK: NumVGPRsForWavesPerEU: 85
+define amdgpu_kernel void @empty_workitems_exactly_256_waves_exactly_2() #10 {
+entry:
+ ret void
+}
+attributes #10 = {"amdgpu-flat-work-group-size"="256,256" "amdgpu-waves-per-eu"="2,2"}
diff --git a/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll b/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
index 5383bbe71ae36..5ffa45595e701 100644
--- a/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
+++ b/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll
@@ -347,7 +347,9 @@ define amdgpu_kernel void @test_fold_canonicalize_qNaN_value_f32(float addrspace
}
; GCN-LABEL: test_fold_canonicalize_minnum_value_from_load_f32:
-; GCN: v_mul_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
+; VI: v_mul_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
+; GFX9: v_min_f32_e32 [[V:v[0-9]+]], 0, v{{[0-9]+}}
+; GFX9: flat_store_dword v[{{[0-9:]+}}], [[V]]
define amdgpu_kernel void @test_fold_canonicalize_minnum_value_from_load_f32(float addrspace(1)* %arg) {
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
@@ -388,9 +390,11 @@ define amdgpu_kernel void @test_fold_canonicalize_sNaN_value_f32(float addrspace
}
; GCN-LABEL: test_fold_canonicalize_denorm_value_f32:
-; GCN: v_min_f32_e32 [[V0:v[0-9]+]], 0x7fffff, v{{[0-9]+}}
-; GCN: v_mul_f32_e32 v{{[0-9]+}}, 1.0, [[V0]]
-; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GFX9: v_min_f32_e32 [[V:v[0-9]+]], 0x7fffff, v{{[0-9]+}}
+; VI: v_min_f32_e32 [[V0:v[0-9]+]], 0x7fffff, v{{[0-9]+}}
+; VI: v_mul_f32_e32 v{{[0-9]+}}, 1.0, [[V0]]
+; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GFX9-NOT: 1.0
define amdgpu_kernel void @test_fold_canonicalize_denorm_value_f32(float addrspace(1)* %arg) {
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
@@ -402,9 +406,11 @@ define amdgpu_kernel void @test_fold_canonicalize_denorm_value_f32(float addrspa
}
; GCN-LABEL: test_fold_canonicalize_maxnum_value_from_load_f32:
-; GCN: v_max_f32_e32 [[V0:v[0-9]+]], 0, v{{[0-9]+}}
-; GCN: v_mul_f32_e32 v{{[0-9]+}}, 1.0, [[V0]]
+; GFX9: v_max_f32_e32 [[V:v[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_max_f32_e32 [[V0:v[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_mul_f32_e32 v{{[0-9]+}}, 1.0, [[V0]]
; GCN: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GFX9-NOT: 1.0
define amdgpu_kernel void @test_fold_canonicalize_maxnum_value_from_load_f32(float addrspace(1)* %arg) {
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
@@ -465,6 +471,49 @@ entry:
ret float %canonicalized
}
+; GCN-LABEL: {{^}}test_fold_canonicalize_load_nnan_value_f32
+; GFX9-DENORM: flat_load_dword [[V:v[0-9]+]],
+; GFX9-DENORM: flat_store_dword v[{{[0-9:]+}}], [[V]]
+; GFX9-DENORM-NOT: 1.0
+; GCN-FLUSH: v_mul_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_fold_canonicalize_load_nnan_value_f32(float addrspace(1)* %arg, float addrspace(1)* %out) #1 {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %id
+ %v = load float, float addrspace(1)* %gep, align 4
+ %canonicalized = tail call float @llvm.canonicalize.f32(float %v)
+ %gep2 = getelementptr inbounds float, float addrspace(1)* %out, i32 %id
+ store float %canonicalized, float addrspace(1)* %gep2, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_load_nnan_value_f64
+; GCN: flat_load_dwordx2 [[V:v\[[0-9:]+\]]],
+; GCN: flat_store_dwordx2 v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_load_nnan_value_f64(double addrspace(1)* %arg, double addrspace(1)* %out) #1 {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds double, double addrspace(1)* %arg, i32 %id
+ %v = load double, double addrspace(1)* %gep, align 8
+ %canonicalized = tail call double @llvm.canonicalize.f64(double %v)
+ %gep2 = getelementptr inbounds double, double addrspace(1)* %out, i32 %id
+ store double %canonicalized, double addrspace(1)* %gep2, align 8
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_load_nnan_value_f16
+; GCN: flat_load_ushort [[V:v[0-9]+]],
+; GCN: flat_store_short v[{{[0-9:]+}}], [[V]]
+; GCN-NOT: 1.0
+define amdgpu_kernel void @test_fold_canonicalize_load_nnan_value_f16(half addrspace(1)* %arg, half addrspace(1)* %out) #1 {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds half, half addrspace(1)* %arg, i32 %id
+ %v = load half, half addrspace(1)* %gep, align 2
+ %canonicalized = tail call half @llvm.canonicalize.f16(half %v)
+ %gep2 = getelementptr inbounds half, half addrspace(1)* %out, i32 %id
+ store half %canonicalized, half addrspace(1)* %gep2, align 2
+ ret void
+}
+
declare float @llvm.canonicalize.f32(float) #0
declare double @llvm.canonicalize.f64(double) #0
declare half @llvm.canonicalize.f16(half) #0
@@ -485,3 +534,4 @@ declare float @llvm.maxnum.f32(float, float) #0
declare double @llvm.maxnum.f64(double, double) #0
attributes #0 = { nounwind readnone }
+attributes #1 = { "no-nans-fp-math"="true" }
diff --git a/test/CodeGen/AMDGPU/function-args.ll b/test/CodeGen/AMDGPU/function-args.ll
index 9b1368493ba5b..6b22cb0b7e286 100644
--- a/test/CodeGen/AMDGPU/function-args.ll
+++ b/test/CodeGen/AMDGPU/function-args.ll
@@ -34,6 +34,22 @@ define void @void_func_i1_signext(i1 signext %arg0) #0 {
ret void
}
+; GCN-LABEL: {{^}}i1_arg_i1_use:
+; GCN: v_and_b32_e32 v0, 1, v0
+; GCN: v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, vcc, -1
+define void @i1_arg_i1_use(i1 %arg) #0 {
+bb:
+ br i1 %arg, label %bb2, label %bb1
+
+bb1:
+ store volatile i32 0, i32 addrspace(1)* undef
+ br label %bb2
+
+bb2:
+ ret void
+}
+
; GCN-LABEL: {{^}}void_func_i8:
; GCN-NOT: v0
; GCN: buffer_store_byte v0, off
diff --git a/test/CodeGen/AMDGPU/hsa.ll b/test/CodeGen/AMDGPU/hsa.ll
index 972fbd66ef378..0b19fbe7d70ce 100644
--- a/test/CodeGen/AMDGPU/hsa.ll
+++ b/test/CodeGen/AMDGPU/hsa.ll
@@ -40,7 +40,7 @@
; HSA-CI: .hsa_code_object_isa 7,0,0,"AMD","AMDGPU"
; HSA-VI: .hsa_code_object_isa 8,0,1,"AMD","AMDGPU"
-; HSA: .amdgpu_hsa_kernel simple
+; HSA-LABEL: .amdgpu_hsa_kernel simple
; HSA: {{^}}simple:
; HSA: .amd_kernel_code_t
; HSA: enable_sgpr_private_segment_buffer = 1
@@ -65,3 +65,11 @@ entry:
store i32 0, i32 addrspace(1)* %out
ret void
}
+
+; HSA-LABEL: .amdgpu_hsa_kernel simple_no_kernargs
+; HSA: enable_sgpr_kernarg_segment_ptr = 0
+define amdgpu_kernel void @simple_no_kernargs() {
+entry:
+ store volatile i32 0, i32 addrspace(1)* undef
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll
index 9a27809f37bb8..70e6b408ca29a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll
@@ -49,6 +49,18 @@ define amdgpu_kernel void @test_implicit_alignment(i32 addrspace(1)* %out, <2 x
ret void
}
+; ALL-LABEL: {{^}}test_no_kernargs:
+; HSA: enable_sgpr_kernarg_segment_ptr = 1
+; HSA: s_load_dword s{{[0-9]+}}, s[4:5]
+define amdgpu_kernel void @test_no_kernargs() #1 {
+ %kernarg.segment.ptr = call noalias i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
+ %header.ptr = bitcast i8 addrspace(2)* %kernarg.segment.ptr to i32 addrspace(2)*
+ %gep = getelementptr i32, i32 addrspace(2)* %header.ptr, i64 10
+ %value = load i32, i32 addrspace(2)* %gep
+ store volatile i32 %value, i32 addrspace(1)* undef
+ ret void
+}
+
declare i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr() #0
declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #0
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll
index f0af876567b49..1c3cba8d3e4fe 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}test1:
; CHECK: v_cndmask_b32_e64 v0, 0, 1, exec
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.waitcnt.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.waitcnt.ll
index ee58d359a9359..a466671d8c552 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.waitcnt.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.waitcnt.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}test1:
; CHECK: image_store
diff --git a/test/CodeGen/AMDGPU/move-to-valu-worklist.ll b/test/CodeGen/AMDGPU/move-to-valu-worklist.ll
new file mode 100644
index 0000000000000..539eed92d540c
--- /dev/null
+++ b/test/CodeGen/AMDGPU/move-to-valu-worklist.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck --check-prefix=GCN %s
+
+; In moveToVALU(), move to vector ALU is performed, all instrs in
+; the use chain will be visited. We do not want the same node to be
+; pushed to the visit worklist more than once.
+
+; GCN-LABEL: {{^}}in_worklist_once:
+; GCN: buffer_load_dword
+; GCN: BB0_1:
+; GCN: v_xor_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: v_xor_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @in_worklist_once() #0 {
+bb:
+ %tmp = load i64, i64* undef
+br label %bb1
+
+bb1: ; preds = %bb1, %bb
+ %tmp2 = phi i64 [ undef, %bb ], [ %tmp16, %bb1 ]
+ %tmp3 = phi i64 [ %tmp, %bb ], [ undef, %bb1 ]
+ %tmp11 = shl i64 %tmp2, 14
+ %tmp13 = xor i64 %tmp11, %tmp2
+ %tmp15 = and i64 %tmp3, %tmp13
+ %tmp16 = xor i64 %tmp15, %tmp3
+br label %bb1
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/mubuf-offset-private.ll b/test/CodeGen/AMDGPU/mubuf-offset-private.ll
index 3a0605fa182a3..742c4f8af85d9 100644
--- a/test/CodeGen/AMDGPU/mubuf-offset-private.ll
+++ b/test/CodeGen/AMDGPU/mubuf-offset-private.ll
@@ -5,42 +5,42 @@
; Test addressing modes when the scratch base is not a frame index.
; GCN-LABEL: {{^}}store_private_offset_i8:
-; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_i8() #0 {
store volatile i8 5, i8* inttoptr (i32 8 to i8*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_i16:
-; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_i16() #0 {
store volatile i16 5, i16* inttoptr (i32 8 to i16*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_i32:
-; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_i32() #0 {
store volatile i32 5, i32* inttoptr (i32 8 to i32*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_v2i32:
-; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s8 offset:8
+; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_v2i32() #0 {
store volatile <2 x i32> <i32 5, i32 10>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_v4i32:
-; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s8 offset:8
+; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_v4i32() #0 {
store volatile <4 x i32> <i32 5, i32 10, i32 15, i32 0>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*)
ret void
}
; GCN-LABEL: {{^}}load_private_offset_i8:
-; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_i8() #0 {
%load = load volatile i8, i8* inttoptr (i32 8 to i8*)
ret void
@@ -65,7 +65,7 @@ define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0
}
; GCN-LABEL: {{^}}load_private_offset_i16:
-; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_i16() #0 {
%load = load volatile i16, i16* inttoptr (i32 8 to i16*)
ret void
@@ -90,28 +90,28 @@ define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #
}
; GCN-LABEL: {{^}}load_private_offset_i32:
-; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_i32() #0 {
%load = load volatile i32, i32* inttoptr (i32 8 to i32*)
ret void
}
; GCN-LABEL: {{^}}load_private_offset_v2i32:
-; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_v2i32() #0 {
%load = load volatile <2 x i32>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*)
ret void
}
; GCN-LABEL: {{^}}load_private_offset_v4i32:
-; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_v4i32() #0 {
%load = load volatile <4 x i32>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset:
-; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s8 offset:4095
+; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:4095
define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 {
store volatile i8 5, i8* inttoptr (i32 4095 to i8*)
ret void
@@ -119,7 +119,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 {
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus1:
; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000
-; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s8 offen{{$}}
+; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen{{$}}
define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 {
store volatile i8 5, i8* inttoptr (i32 4096 to i8*)
ret void
@@ -127,7 +127,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 {
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus2:
; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000
-; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s8 offen offset:1{{$}}
+; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen offset:1{{$}}
define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 {
store volatile i8 5, i8* inttoptr (i32 4097 to i8*)
ret void
diff --git a/test/CodeGen/AMDGPU/parallelandifcollapse.ll b/test/CodeGen/AMDGPU/parallelandifcollapse.ll
index 190d2b72ebafe..87f37144244e1 100644
--- a/test/CodeGen/AMDGPU/parallelandifcollapse.ll
+++ b/test/CodeGen/AMDGPU/parallelandifcollapse.ll
@@ -8,7 +8,7 @@
; CHECK-NEXT: OR_INT
; FIXME: For some reason having the allocas here allowed the flatten cfg pass
-; to do its transfomation, however now that we are using local memory for
+; to do its transformation, however now that we are using local memory for
; allocas, the transformation isn't happening.
define amdgpu_kernel void @_Z9chk1D_512v() #0 {
diff --git a/test/CodeGen/AMDGPU/parallelorifcollapse.ll b/test/CodeGen/AMDGPU/parallelorifcollapse.ll
index 91116b0f65ea4..e199d5b5df254 100644
--- a/test/CodeGen/AMDGPU/parallelorifcollapse.ll
+++ b/test/CodeGen/AMDGPU/parallelorifcollapse.ll
@@ -5,7 +5,7 @@
; then merge if-regions with the same bodies.
; FIXME: For some reason having the allocas here allowed the flatten cfg pass
-; to do its transfomation, however now that we are using local memory for
+; to do its transformation, however now that we are using local memory for
; allocas, the transformation isn't happening.
; XFAIL: *
;
diff --git a/test/CodeGen/AMDGPU/private-access-no-objects.ll b/test/CodeGen/AMDGPU/private-access-no-objects.ll
index dcb089010e99d..cf0c7944d4cdc 100644
--- a/test/CodeGen/AMDGPU/private-access-no-objects.ll
+++ b/test/CodeGen/AMDGPU/private-access-no-objects.ll
@@ -10,14 +10,14 @@
; GCN-LABEL: {{^}}store_to_undef:
; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
-; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
+; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
; -O0 should assume spilling, so the input scratch resource descriptor
; -should be used directly without any copies.
; OPTNONE-NOT: s_mov_b32
-; OPTNONE: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s7 offen{{$}}
+; OPTNONE: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s5 offen{{$}}
define amdgpu_kernel void @store_to_undef() #0 {
store volatile i32 0, i32* undef
ret void
@@ -26,7 +26,7 @@ define amdgpu_kernel void @store_to_undef() #0 {
; GCN-LABEL: {{^}}store_to_inttoptr:
; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
-; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
+; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPT: buffer_store_dword v{{[0-9]+}}, off, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offset:124{{$}}
define amdgpu_kernel void @store_to_inttoptr() #0 {
store volatile i32 0, i32* inttoptr (i32 124 to i32*)
@@ -36,7 +36,7 @@ define amdgpu_kernel void @store_to_inttoptr() #0 {
; GCN-LABEL: {{^}}load_from_undef:
; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
-; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
+; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
define amdgpu_kernel void @load_from_undef() #0 {
%ld = load volatile i32, i32* undef
@@ -46,7 +46,7 @@ define amdgpu_kernel void @load_from_undef() #0 {
; GCN-LABEL: {{^}}load_from_inttoptr:
; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
-; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
+; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPT: buffer_load_dword v{{[0-9]+}}, off, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offset:124{{$}}
define amdgpu_kernel void @load_from_inttoptr() #0 {
%ld = load volatile i32, i32* inttoptr (i32 124 to i32*)
diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir b/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
index 770bfaddb23e7..a52b80ba86e59 100644
--- a/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
+++ b/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
@@ -34,7 +34,7 @@ body: |
bb.0:
successors: %bb.2, %bb.1
- %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, 0, implicit %exec
+ %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit %exec
%vcc = COPY killed %7
S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
diff --git a/test/CodeGen/AMDGPU/scratch-simple.ll b/test/CodeGen/AMDGPU/scratch-simple.ll
index 6ed730ad60f42..5e0178072e5e2 100644
--- a/test/CodeGen/AMDGPU/scratch-simple.ll
+++ b/test/CodeGen/AMDGPU/scratch-simple.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=amdgcn -mcpu=verde -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=gfx804 -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s
; This used to fail due to a v_add_i32 instruction with an illegal immediate
@@ -8,15 +8,16 @@
;
; GCN-LABEL: {{^}}ps_main:
-; GCN-DAG: s_mov_b32 [[SWO:s[0-9]+]], s0
+; GCN-DAG: s_mov_b32 s4, SCRATCH_RSRC_DWORD0
+; GCN-NOT: s_mov_b32 s0
; GCN-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0
; GCN-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]]
; GCN-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], 0x200, [[CLAMP_IDX]]
; GCN-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], 0x400, [[CLAMP_IDX]]
-; GCN: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
-; GCN: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, s0 offen
+; GCN: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, s0 offen
define amdgpu_ps float @ps_main(i32 %idx) {
%v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
%v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
@@ -25,9 +26,10 @@ define amdgpu_ps float @ps_main(i32 %idx) {
}
; GCN-LABEL: {{^}}vs_main:
-; GCN: s_mov_b32 [[SWO:s[0-9]+]], s0
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN-DAG: s_mov_b32 s4, SCRATCH_RSRC_DWORD0
+; GCN-NOT: s_mov_b32 s0
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s0 offen
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s0 offen
define amdgpu_vs float @vs_main(i32 %idx) {
%v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
%v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
@@ -36,9 +38,9 @@ define amdgpu_vs float @vs_main(i32 %idx) {
}
; GCN-LABEL: {{^}}cs_main:
-; GCN: s_mov_b32 [[SWO:s[0-9]+]], s0
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN-DAG: s_mov_b32 s4, SCRATCH_RSRC_DWORD0
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s0 offen
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s0 offen
define amdgpu_cs float @cs_main(i32 %idx) {
%v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
%v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
@@ -47,10 +49,15 @@ define amdgpu_cs float @cs_main(i32 %idx) {
}
; GCN-LABEL: {{^}}hs_main:
-; SI: s_mov_b32 [[SWO:s[0-9]+]], s0
-; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; SI: s_mov_b32 s4, SCRATCH_RSRC_DWORD0
+; SI-NOT: s_mov_b32 s0
+; SI: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s0 offen
+; SI: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s0 offen
+
+; GFX9: s_mov_b32 s0, SCRATCH_RSRC_DWORD0
+; GFX9-NOT: s_mov_b32 s5
+; GFX9: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s5 offen
+; GFX9: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s5 offen
define amdgpu_hs float @hs_main(i32 %idx) {
%v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
%v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
@@ -59,10 +66,13 @@ define amdgpu_hs float @hs_main(i32 %idx) {
}
; GCN-LABEL: {{^}}gs_main:
-; SI: s_mov_b32 [[SWO:s[0-9]+]], s0
-; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; SI: s_mov_b32 s4, SCRATCH_RSRC_DWORD0
+; SI: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s0 offen
+; SI: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s0 offen
+
+; GFX9: s_mov_b32 s0, SCRATCH_RSRC_DWORD0
+; GFX9: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s5 offen
+; GFX9: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s5 offen
define amdgpu_gs float @gs_main(i32 %idx) {
%v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
%v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
@@ -71,10 +81,16 @@ define amdgpu_gs float @gs_main(i32 %idx) {
}
; GCN-LABEL: {{^}}hs_ir_uses_scratch_offset:
-; SI: s_mov_b32 [[SWO:s[0-9]+]], s6
-; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+
+; SI-NOT: s_mov_b32 s6
+; SI: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s6 offen
+; SI: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s6 offen
+
+; GFX9-NOT: s_mov_b32 s5
+; GFX9: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s5 offen
+; GFX9: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s5 offen
+
; GCN: s_mov_b32 s2, s5
define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg %swo, i32 %idx) {
%v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
@@ -86,10 +102,14 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
}
; GCN-LABEL: {{^}}gs_ir_uses_scratch_offset:
-; SI: s_mov_b32 [[SWO:s[0-9]+]], s6
-; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
-; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+
+; SI: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s6 offen
+; SI: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s6 offen
+
+; GFX9: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s5 offen
+; GFX9: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, s5 offen
+
; GCN: s_mov_b32 s2, s5
define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg %swo, i32 %idx) {
%v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
diff --git a/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir b/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
index 4f5c582f8b583..ff1b2ad73ef0b 100644
--- a/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
+++ b/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
@@ -332,7 +332,7 @@ body: |
# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %{{[0-9]+}} = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 0, implicit-def %exec, implicit %exec
+# VI: %{{[0-9]+}} = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def %exec, implicit %exec
# VI: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def %vcc, implicit %exec
# VI: %{{[0-9]+}} = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def %exec, implicit %exec
@@ -345,20 +345,21 @@ body: |
# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 2, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 2, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec
# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, 2, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 0, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 2, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 2, implicit %exec
+# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit %exec
# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, 2, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def %exec, implicit %exec
+
name: vopc_instructions
@@ -415,28 +416,28 @@ body: |
V_CMPX_EQ_I32_e32 123, killed %13, implicit-def %vcc, implicit-def %exec, implicit %exec
%14 = V_AND_B32_e64 %5, %3, implicit %exec
- %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, 0, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit %exec
%15 = V_AND_B32_e64 %5, %3, implicit %exec
- %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, 0, implicit-def %exec, implicit %exec
+ %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def %exec, implicit %exec
%16 = V_AND_B32_e64 %5, %3, implicit %exec
%vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit %exec
%17 = V_AND_B32_e64 %5, %3, implicit %exec
%19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def %exec, implicit %exec
%20 = V_AND_B32_e64 %5, %3, implicit %exec
- %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, 0, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit %exec
%21 = V_AND_B32_e64 %5, %3, implicit %exec
- %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, 2, implicit-def %exec, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def %exec, implicit %exec
%23 = V_AND_B32_e64 %5, %3, implicit %exec
- %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, 2, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit %exec
%24 = V_AND_B32_e64 %5, %3, implicit %exec
- %vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, 0, implicit-def %exec, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def %exec, implicit %exec
%25 = V_AND_B32_e64 %5, %3, implicit %exec
- %vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, 0, implicit-def %exec, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def %exec, implicit %exec
%26 = V_AND_B32_e64 %5, %3, implicit %exec
- %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, 0, implicit-def %exec, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def %exec, implicit %exec
%27 = V_AND_B32_e64 %5, %3, implicit %exec
- %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, 2, implicit-def %exec, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def %exec, implicit %exec
%100 = V_MOV_B32_e32 %vcc_lo, implicit %exec
diff --git a/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir b/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir
index 913b543321190..bd222adf6a68c 100644
--- a/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir
+++ b/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir
@@ -8,7 +8,7 @@
# GCN: %{{[0-9]+}} = V_BCNT_U32_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def %vcc, implicit %exec
# GCN: %{{[0-9]+}} = V_BFM_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def %vcc, implicit %exec
-# GCN: %{{[0-9]+}} = V_CVT_PKNORM_I16_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 0, 0, implicit-def %vcc, implicit %exec
+# GCN: %{{[0-9]+}} = V_CVT_PKNORM_I16_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 0, implicit-def %vcc, implicit %exec
# GCN: %{{[0-9]+}} = V_READLANE_B32 killed %{{[0-9]+}}, 0, implicit-def %vcc, implicit %exec
---
@@ -50,7 +50,7 @@ body: |
%15 = V_BFM_B32_e64 %13, killed %14, implicit-def %vcc, implicit %exec
%16 = V_LSHRREV_B32_e64 16, %15, implicit %exec
- %17 = V_CVT_PKNORM_I16_F32_e64 0, %15, 0, killed %16, 0, 0, implicit-def %vcc, implicit %exec
+ %17 = V_CVT_PKNORM_I16_F32_e64 0, %15, 0, killed %16, 0, implicit-def %vcc, implicit %exec
%18 = V_LSHRREV_B32_e64 16, %17, implicit %exec
%19 = V_READLANE_B32 killed %18, 0, implicit-def %vcc, implicit %exec
diff --git a/test/CodeGen/AMDGPU/trap.ll b/test/CodeGen/AMDGPU/trap.ll
index 51771c9723e00..04ff4c87ea775 100644
--- a/test/CodeGen/AMDGPU/trap.ll
+++ b/test/CodeGen/AMDGPU/trap.ll
@@ -19,11 +19,11 @@ declare void @llvm.debugtrap() #0
; MESA-TRAP: .section .AMDGPU.config
; MESA-TRAP: .long 47180
-; MESA-TRAP-NEXT: .long 208
+; MESA-TRAP-NEXT: .long 204
; NOMESA-TRAP: .section .AMDGPU.config
; NOMESA-TRAP: .long 47180
-; NOMESA-TRAP-NEXT: .long 144
+; NOMESA-TRAP-NEXT: .long 140
; GCN-LABEL: {{^}}hsa_trap:
; HSA-TRAP: enable_trap_handler = 1
@@ -45,11 +45,11 @@ define amdgpu_kernel void @hsa_trap() {
; MESA-TRAP: .section .AMDGPU.config
; MESA-TRAP: .long 47180
-; MESA-TRAP-NEXT: .long 208
+; MESA-TRAP-NEXT: .long 204
; NOMESA-TRAP: .section .AMDGPU.config
; NOMESA-TRAP: .long 47180
-; NOMESA-TRAP-NEXT: .long 144
+; NOMESA-TRAP-NEXT: .long 140
; GCN-WARNING: warning: <unknown>:0:0: in function hsa_debugtrap void (): debugtrap handler not supported
; GCN-LABEL: {{^}}hsa_debugtrap:
diff --git a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
index 6eb937e71b1b6..54991d3d953cd 100644
--- a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
+++ b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
@@ -81,7 +81,7 @@ body: |
%sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%sgpr7 = S_MOV_B32 61440
%sgpr6 = S_MOV_B32 -1
- %vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, 0, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, implicit %exec
S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc
bb.2.if:
diff --git a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll
index 135f02ac205a2..feae5e9f37920 100644
--- a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll
+++ b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll
@@ -19,8 +19,9 @@
; HSA: workitem_private_segment_byte_size = 1536
; GCN-NOT: flat_scr
+; MESA-NOT: s_mov_b32 s3
+; HSA-NOT: s_mov_b32 s7
-; GCNMESA-DAG: s_mov_b32 s16, s3
; GCNMESA-DAG: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GCNMESA-DAG: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GCNMESA-DAG: s_mov_b32 s14, -1
@@ -29,17 +30,32 @@
; GFX9MESA-DAG: s_mov_b32 s15, 0xe00000
-; GCN: buffer_store_dword {{v[0-9]+}}, off, s[12:15], s16 offset:{{[0-9]+}} ; 4-byte Folded Spill
+; GCNMESAMESA: buffer_store_dword {{v[0-9]+}}, off, s[12:15], s3 offset:{{[0-9]+}} ; 4-byte Folded Spill
-; GCN: buffer_store_dword {{v[0-9]}}, off, s[12:15], s16 offset:{{[0-9]+}}
-; GCN: buffer_store_dword {{v[0-9]}}, off, s[12:15], s16 offset:{{[0-9]+}}
-; GCN: buffer_store_dword {{v[0-9]}}, off, s[12:15], s16 offset:{{[0-9]+}}
-; GCN: buffer_store_dword {{v[0-9]}}, off, s[12:15], s16 offset:{{[0-9]+}}
+; GCNMESA: buffer_store_dword {{v[0-9]}}, off, s[12:15], s3 offset:{{[0-9]+}}
+; GCNMESA: buffer_store_dword {{v[0-9]}}, off, s[12:15], s3 offset:{{[0-9]+}}
+; GCNMESA: buffer_store_dword {{v[0-9]}}, off, s[12:15], s3 offset:{{[0-9]+}}
+; GCNMESA: buffer_store_dword {{v[0-9]}}, off, s[12:15], s3 offset:{{[0-9]+}}
+
+; GCNMESA: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s3 offset:{{[0-9]+}}
+; GCNMESA: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s3 offset:{{[0-9]+}}
+; GCNMESA: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s3 offset:{{[0-9]+}}
+; GCNMESA: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s3 offset:{{[0-9]+}}
+
+
+
+; HSA: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s7 offset:{{[0-9]+}} ; 4-byte Folded Spill
+
+; HSA: buffer_store_dword {{v[0-9]}}, off, s[0:3], s7 offset:{{[0-9]+}}
+; HSA: buffer_store_dword {{v[0-9]}}, off, s[0:3], s7 offset:{{[0-9]+}}
+; HSA: buffer_store_dword {{v[0-9]}}, off, s[0:3], s7 offset:{{[0-9]+}}
+; HSA: buffer_store_dword {{v[0-9]}}, off, s[0:3], s7 offset:{{[0-9]+}}
+
+; HSA: buffer_load_dword {{v[0-9]+}}, off, s[0:3], s7 offset:{{[0-9]+}}
+; HSA: buffer_load_dword {{v[0-9]+}}, off, s[0:3], s7 offset:{{[0-9]+}}
+; HSA: buffer_load_dword {{v[0-9]+}}, off, s[0:3], s7 offset:{{[0-9]+}}
+; HSA: buffer_load_dword {{v[0-9]+}}, off, s[0:3], s7 offset:{{[0-9]+}}
-; GCN: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s16 offset:{{[0-9]+}}
-; GCN: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s16 offset:{{[0-9]+}}
-; GCN: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s16 offset:{{[0-9]+}}
-; GCN: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s16 offset:{{[0-9]+}}
; GCN: NumVgprs: 256
; GCN: ScratchSize: 1536
diff --git a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
index ca2366a361fbf..afbd06a00faed 100644
--- a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
+++ b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
@@ -13,7 +13,7 @@
; GCN-LABEL: {{^}}main:
-; GCN-DAG: s_mov_b32 s[[OFFREG:[0-9]+]], s12
+; GCN-NOT: s_mov_b32 s12
; GCN-DAG: s_mov_b32 s[[DESC0:[0-9]+]], SCRATCH_RSRC_DWORD0
; GCN-DAG: s_mov_b32 s{{[0-9]+}}, SCRATCH_RSRC_DWORD1
; GCN-DAG: s_mov_b32 s{{[0-9]+}}, -1
@@ -22,8 +22,8 @@
; GFX9-DAG: s_mov_b32 s[[DESC3:[0-9]+]], 0xe00000
; OFFREG is offset system SGPR
-; GCN: buffer_store_dword {{v[0-9]+}}, off, s{{\[}}[[DESC0]]:[[DESC3]]], s[[OFFREG]] offset:{{[0-9]+}} ; 4-byte Folded Spill
-; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[}}[[DESC0]]:[[DESC3]]], s[[OFFREG]] offset:{{[0-9]+}} ; 4-byte Folded Reload
+; GCN: buffer_store_dword {{v[0-9]+}}, off, s{{\[}}[[DESC0]]:[[DESC3]]], s12 offset:{{[0-9]+}} ; 4-byte Folded Spill
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[}}[[DESC0]]:[[DESC3]]], s12 offset:{{[0-9]+}} ; 4-byte Folded Reload
; GCN: NumVgprs: 256
; GCN: ScratchSize: 1536
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
index 6a1da0dfe85f3..0e3ef479bc3c8 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
@@ -45,6 +45,8 @@
define void @test_select_s32() { ret void }
define void @test_select_ptr() { ret void }
+ define void @test_br() { ret void }
+
define void @test_soft_fp_double() #0 { ret void }
attributes #0 = { "target-features"="+vfp2,-neonfp" }
@@ -1173,6 +1175,43 @@ body: |
; CHECK: BX_RET 14, _, implicit %r0
...
---
+name: test_br
+# CHECK-LABEL: name: test_br
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+body: |
+ bb.0:
+ ; CHECK: bb.0
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: %r0
+
+ %0(s1) = COPY %r0
+ ; CHECK: [[COND:%[0-9]+]] = COPY %r0
+
+ G_BRCOND %0(s1), %bb.1
+ ; CHECK: TSTri [[COND]], 1, 14, _, implicit-def %cpsr
+ ; CHECK: Bcc %bb.1, 0, %cpsr
+ G_BR %bb.2
+ ; CHECK: B %bb.2
+
+ bb.1:
+ ; CHECK: bb.1
+ successors: %bb.2(0x80000000)
+
+ G_BR %bb.2
+ ; CHECK: B %bb.2
+
+ bb.2:
+ ; CHECK: bb.2
+
+ BX_RET 14, _
+ ; CHECK: BX_RET 14, _
+...
+---
name: test_soft_fp_double
# CHECK-LABEL: name: test_soft_fp_double
legalized: true
diff --git a/test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll b/test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll
index c778caacd0f45..c2e8c5abca4e1 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll
@@ -87,3 +87,55 @@ define arm_aapcscc i32 @test_urem_i32(i32 %x, i32 %y) {
%r = urem i32 %x, %y
ret i32 %r
}
+
+define arm_aapcscc i16 @test_srem_i16(i16 %x, i16 %y) {
+; CHECK-LABEL: test_srem_i16:
+; CHECK-DAG: sxth r0, r0
+; CHECK-DAG: sxth r1, r1
+; HWDIV: sdiv [[Q:r[0-9]+]], r0, r1
+; HWDIV: mul [[P:r[0-9]+]], [[Q]], r1
+; HWDIV: sub r0, r0, [[P]]
+; SOFT-AEABI: blx __aeabi_idivmod
+; SOFT-DEFAULT: blx __modsi3
+ %r = srem i16 %x, %y
+ ret i16 %r
+}
+
+define arm_aapcscc i16 @test_urem_i16(i16 %x, i16 %y) {
+; CHECK-LABEL: test_urem_i16:
+; CHECK-DAG: uxth r0, r0
+; CHECK-DAG: uxth r1, r1
+; HWDIV: udiv [[Q:r[0-9]+]], r0, r1
+; HWDIV: mul [[P:r[0-9]+]], [[Q]], r1
+; HWDIV: sub r0, r0, [[P]]
+; SOFT-AEABI: blx __aeabi_uidivmod
+; SOFT-DEFAULT: blx __umodsi3
+ %r = urem i16 %x, %y
+ ret i16 %r
+}
+
+define arm_aapcscc i8 @test_srem_i8(i8 %x, i8 %y) {
+; CHECK-LABEL: test_srem_i8:
+; CHECK-DAG: sxtb r0, r0
+; CHECK-DAG: sxtb r1, r1
+; HWDIV: sdiv [[Q:r[0-9]+]], r0, r1
+; HWDIV: mul [[P:r[0-9]+]], [[Q]], r1
+; HWDIV: sub r0, r0, [[P]]
+; SOFT-AEABI: blx __aeabi_idivmod
+; SOFT-DEFAULT: blx __modsi3
+ %r = srem i8 %x, %y
+ ret i8 %r
+}
+
+define arm_aapcscc i8 @test_urem_i8(i8 %x, i8 %y) {
+; CHECK-LABEL: test_urem_i8:
+; CHECK-DAG: uxtb r0, r0
+; CHECK-DAG: uxtb r1, r1
+; HWDIV: udiv [[Q:r[0-9]+]], r0, r1
+; HWDIV: mul [[P:r[0-9]+]], [[Q]], r1
+; HWDIV: sub r0, r0, [[P]]
+; SOFT-AEABI: blx __aeabi_uidivmod
+; SOFT-DEFAULT: blx __umodsi3
+ %r = urem i8 %x, %y
+ ret i8 %r
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-isel.ll b/test/CodeGen/ARM/GlobalISel/arm-isel.ll
index 4c498ff6ca9bf..419bcf71c1065 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-isel.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-isel.ll
@@ -420,3 +420,42 @@ entry:
%r = select i1 %cond, i32* %a, i32* %b
ret i32* %r
}
+
+define arm_aapcscc void @test_br() {
+; CHECK-LABEL: test_br
+; CHECK: [[LABEL:.L[[:alnum:]_]+]]:
+; CHECK: b [[LABEL]]
+entry:
+ br label %infinite
+
+infinite:
+ br label %infinite
+}
+
+declare arm_aapcscc void @brcond1()
+declare arm_aapcscc void @brcond2()
+
+define arm_aapcscc void @test_brcond(i32 %n) {
+; CHECK-LABEL: test_brcond
+; CHECK: cmp r0
+; CHECK-NEXT: movgt [[RCMP:r[0-9]+]], #1
+; CHECK: tst [[RCMP]], #1
+; CHECK-NEXT: bne [[FALSE:.L[[:alnum:]_]+]]
+; CHECK: blx brcond1
+; CHECK: [[FALSE]]:
+; CHECK: blx brcond2
+entry:
+ %cmp = icmp sgt i32 %n, 0
+ br i1 %cmp, label %if.true, label %if.false
+
+if.true:
+ call arm_aapcscc void @brcond1()
+ br label %if.end
+
+if.false:
+ call arm_aapcscc void @brcond2()
+ br label %if.end
+
+if.end:
+ ret void
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
index 9a0877846fc3e..f436c3774c869 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
@@ -14,6 +14,12 @@
define void @test_srem_i32() { ret void }
define void @test_urem_i32() { ret void }
+
+ define void @test_srem_i16() { ret void }
+ define void @test_urem_i16() { ret void }
+
+ define void @test_srem_i8() { ret void }
+ define void @test_urem_i8() { ret void }
...
---
name: test_sdiv_i32
@@ -323,3 +329,171 @@ body: |
%r0 = COPY %2(s32)
BX_RET 14, _, implicit %r0
...
+---
+name: test_srem_i16
+# CHECK-LABEL: name: test_srem_i16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ ; CHECK-DAG: [[X:%[0-9]+]](s16) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s16) = COPY %r1
+ ; CHECK-DAG: [[X32:%[0-9]+]](s32) = G_SEXT [[X]](s16)
+ ; CHECK-DAG: [[Y32:%[0-9]+]](s32) = G_SEXT [[Y]](s16)
+ %0(s16) = COPY %r0
+ %1(s16) = COPY %r1
+ ; HWDIV: [[Q32:%[0-9]+]](s32) = G_SDIV [[X32]], [[Y32]]
+ ; HWDIV: [[P32:%[0-9]+]](s32) = G_MUL [[Q32]], [[Y32]]
+ ; HWDIV: [[R32:%[0-9]+]](s32) = G_SUB [[X32]], [[P32]]
+ ; SOFT-NOT: G_SREM
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X32]]
+ ; SOFT-DAG: %r1 = COPY [[Y32]]
+ ; SOFT-AEABI: BLX $__aeabi_idivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-AEABI: [[R32:%[0-9]+]](s32) = COPY %r1
+ ; SOFT-DEFAULT: BLX $__modsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: [[R32:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_SREM
+ ; CHECK: [[R:%[0-9]+]](s16) = G_TRUNC [[R32]]
+ ; SOFT-NOT: G_SREM
+ %2(s16) = G_SREM %0, %1
+ ; CHECK: %r0 = COPY [[R]]
+ %r0 = COPY %2(s16)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_urem_i16
+# CHECK-LABEL: name: test_urem_i16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ ; CHECK-DAG: [[X:%[0-9]+]](s16) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s16) = COPY %r1
+ ; CHECK-DAG: [[X32:%[0-9]+]](s32) = G_ZEXT [[X]](s16)
+ ; CHECK-DAG: [[Y32:%[0-9]+]](s32) = G_ZEXT [[Y]](s16)
+ %0(s16) = COPY %r0
+ %1(s16) = COPY %r1
+ ; HWDIV: [[Q32:%[0-9]+]](s32) = G_UDIV [[X32]], [[Y32]]
+ ; HWDIV: [[P32:%[0-9]+]](s32) = G_MUL [[Q32]], [[Y32]]
+ ; HWDIV: [[R32:%[0-9]+]](s32) = G_SUB [[X32]], [[P32]]
+ ; SOFT-NOT: G_UREM
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X32]]
+ ; SOFT-DAG: %r1 = COPY [[Y32]]
+ ; SOFT-AEABI: BLX $__aeabi_uidivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-AEABI: [[R32:%[0-9]+]](s32) = COPY %r1
+ ; SOFT-DEFAULT: BLX $__umodsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: [[R32:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_UREM
+ ; CHECK: [[R:%[0-9]+]](s16) = G_TRUNC [[R32]]
+ ; SOFT-NOT: G_UREM
+ %2(s16) = G_UREM %0, %1
+ ; CHECK: %r0 = COPY [[R]]
+ %r0 = COPY %2(s16)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_srem_i8
+# CHECK-LABEL: name: test_srem_i8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ ; CHECK-DAG: [[X:%[0-9]+]](s8) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s8) = COPY %r1
+ ; CHECK-DAG: [[X32:%[0-9]+]](s32) = G_SEXT [[X]](s8)
+ ; CHECK-DAG: [[Y32:%[0-9]+]](s32) = G_SEXT [[Y]](s8)
+ %0(s8) = COPY %r0
+ %1(s8) = COPY %r1
+ ; HWDIV: [[Q32:%[0-9]+]](s32) = G_SDIV [[X32]], [[Y32]]
+ ; HWDIV: [[P32:%[0-9]+]](s32) = G_MUL [[Q32]], [[Y32]]
+ ; HWDIV: [[R32:%[0-9]+]](s32) = G_SUB [[X32]], [[P32]]
+ ; SOFT-NOT: G_SREM
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X32]]
+ ; SOFT-DAG: %r1 = COPY [[Y32]]
+ ; SOFT-AEABI: BLX $__aeabi_idivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-AEABI: [[R32:%[0-9]+]](s32) = COPY %r1
+ ; SOFT-DEFAULT: BLX $__modsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: [[R32:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_SREM
+ ; CHECK: [[R:%[0-9]+]](s8) = G_TRUNC [[R32]]
+ ; SOFT-NOT: G_SREM
+ %2(s8) = G_SREM %0, %1
+ ; CHECK: %r0 = COPY [[R]]
+ %r0 = COPY %2(s8)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_urem_i8
+# CHECK-LABEL: name: test_urem_i8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ ; CHECK-DAG: [[X:%[0-9]+]](s8) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s8) = COPY %r1
+ ; CHECK-DAG: [[X32:%[0-9]+]](s32) = G_ZEXT [[X]](s8)
+ ; CHECK-DAG: [[Y32:%[0-9]+]](s32) = G_ZEXT [[Y]](s8)
+ %0(s8) = COPY %r0
+ %1(s8) = COPY %r1
+ ; HWDIV: [[Q32:%[0-9]+]](s32) = G_UDIV [[X32]], [[Y32]]
+ ; HWDIV: [[P32:%[0-9]+]](s32) = G_MUL [[Q32]], [[Y32]]
+ ; HWDIV: [[R32:%[0-9]+]](s32) = G_SUB [[X32]], [[P32]]
+ ; SOFT-NOT: G_UREM
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X32]]
+ ; SOFT-DAG: %r1 = COPY [[Y32]]
+ ; SOFT-AEABI: BLX $__aeabi_uidivmod, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-AEABI: [[R32:%[0-9]+]](s32) = COPY %r1
+ ; SOFT-DEFAULT: BLX $__umodsi3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: [[R32:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ ; SOFT-NOT: G_UREM
+ ; CHECK: [[R:%[0-9]+]](s8) = G_TRUNC [[R32]]
+ ; SOFT-NOT: G_UREM
+ %2(s8) = G_UREM %0, %1
+ ; CHECK: %r0 = COPY [[R]]
+ %r0 = COPY %2(s8)
+ BX_RET 14, _, implicit %r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index 4575341dfc290..616f29d3b0689 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -42,6 +42,8 @@
define void @test_select_s32() { ret void }
define void @test_select_ptr() { ret void }
+ define void @test_brcond() { ret void }
+
define void @test_fadd_s32() #0 { ret void }
define void @test_fadd_s64() #0 { ret void }
@@ -863,6 +865,40 @@ body: |
BX_RET 14, _, implicit %r0
...
---
+name: test_brcond
+# CHECK-LABEL: name: test_brcond
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s1) = G_ICMP intpred(sgt), %0(s32), %1
+ G_BRCOND %2(s1), %bb.1
+ ; G_BRCOND with s1 is legal, so we should find it unchanged in the output
+ ; CHECK: G_BRCOND {{%[0-9]+}}(s1), %bb.1
+ G_BR %bb.2
+
+ bb.1:
+ %r0 = COPY %1(s32)
+ BX_RET 14, _, implicit %r0
+
+ bb.2:
+ %r0 = COPY %0(s32)
+ BX_RET 14, _, implicit %r0
+
+...
+---
name: test_fadd_s32
# CHECK-LABEL: name: test_fadd_s32
legalized: false
diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
index ffca431d96ea1..638c6e6209266 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
@@ -40,6 +40,8 @@
define void @test_select_s32() { ret void }
+ define void @test_br() { ret void }
+
define void @test_fadd_s32() #0 { ret void }
define void @test_fadd_s64() #0 { ret void }
@@ -830,6 +832,34 @@ body: |
...
---
+name: test_br
+# CHECK-LABEL: name: test_br
+legalized: true
+regBankSelected: false
+# CHECK: regBankSelected: true
+selected: false
+registers:
+ - { id: 0, class: _ }
+# CHECK: { id: 0, class: gprb, preferred-register: '' }
+# Check that we map the condition of the G_BRCOND into the GPR.
+# For the G_BR, there are no registers to map, but make sure we don't crash.
+body: |
+ bb.0:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: %r0
+
+ %0(s1) = COPY %r0
+ G_BRCOND %0(s1), %bb.1
+ G_BR %bb.2
+
+ bb.1:
+ BX_RET 14, _
+
+ bb.2:
+ BX_RET 14, _
+
+...
+---
name: test_fadd_s32
# CHECK-LABEL: name: test_fadd_s32
legalized: true
diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll
index 23c4ccea46046..644a7fbf8d9ad 100644
--- a/test/CodeGen/ARM/atomic-op.ll
+++ b/test/CodeGen/ARM/atomic-op.ll
@@ -26,6 +26,7 @@ entry:
store i32 3855, i32* %xort
store i32 4, i32* %temp
%tmp = load i32, i32* %temp
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: add
; CHECK: strex
@@ -35,6 +36,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%0 = atomicrmw add i32* %val1, i32 %tmp monotonic
store i32 %0, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: sub
; CHECK: strex
@@ -44,6 +46,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%1 = atomicrmw sub i32* %val2, i32 30 monotonic
store i32 %1, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: add
; CHECK: strex
@@ -53,6 +56,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%2 = atomicrmw add i32* %val2, i32 1 monotonic
store i32 %2, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: sub
; CHECK: strex
@@ -62,6 +66,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%3 = atomicrmw sub i32* %val2, i32 1 monotonic
store i32 %3, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: and
; CHECK: strex
@@ -71,6 +76,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%4 = atomicrmw and i32* %andt, i32 4080 monotonic
store i32 %4, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: or
; CHECK: strex
@@ -80,6 +86,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%5 = atomicrmw or i32* %ort, i32 4080 monotonic
store i32 %5, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: eor
; CHECK: strex
@@ -89,6 +96,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%6 = atomicrmw xor i32* %xort, i32 4080 monotonic
store i32 %6, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
@@ -98,6 +106,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%7 = atomicrmw min i32* %val2, i32 16 monotonic
store i32 %7, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
%neg = sub i32 0, 1
; CHECK: ldrex
; CHECK: cmp
@@ -108,6 +117,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%8 = atomicrmw min i32* %val2, i32 %neg monotonic
store i32 %8, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
@@ -117,6 +127,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%9 = atomicrmw max i32* %val2, i32 1 monotonic
store i32 %9, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
@@ -126,6 +137,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%10 = atomicrmw max i32* %val2, i32 0 monotonic
store i32 %10, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
@@ -135,6 +147,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%11 = atomicrmw umin i32* %val2, i32 16 monotonic
store i32 %11, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
%uneg = sub i32 0, 1
; CHECK: ldrex
; CHECK: cmp
@@ -145,6 +158,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%12 = atomicrmw umin i32* %val2, i32 %uneg monotonic
store i32 %12, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
@@ -154,6 +168,7 @@ entry:
; CHECK-BAREMETAL-NOT: __sync
%13 = atomicrmw umax i32* %val2, i32 1 monotonic
store i32 %13, i32* %old
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
diff --git a/test/CodeGen/AVR/branch-relaxation.ll b/test/CodeGen/AVR/branch-relaxation.ll
index d6f07f6535763..e415b059692e7 100644
--- a/test/CodeGen/AVR/branch-relaxation.ll
+++ b/test/CodeGen/AVR/branch-relaxation.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=avr | FileCheck %s
-; CHECKC-LABEL: relax_breq
+; CHECK-LABEL: relax_breq
; CHECK: cpi r{{[0-9]+}}, 0
; CHECK: brne LBB0_1
; CHECK: rjmp LBB0_2
@@ -66,7 +66,7 @@ finished:
ret i8 3
}
-; CHECKC-LABEL: no_relax_breq
+; CHECK-LABEL: no_relax_breq
; CHECK: cpi r{{[0-9]+}}, 0
; CHECK: breq [[END_BB:LBB[0-9]+_[0-9]+]]
; CHECK: nop
diff --git a/test/CodeGen/BPF/select_ri.ll b/test/CodeGen/BPF/select_ri.ll
new file mode 100644
index 0000000000000..c4ac376502b8a
--- /dev/null
+++ b/test/CodeGen/BPF/select_ri.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -march=bpf -verify-machineinstrs | FileCheck %s
+;
+; Source file:
+; int b, c;
+; int test() {
+; int a = b;
+; if (a)
+; a = c;
+; return a;
+; }
+@b = common local_unnamed_addr global i32 0, align 4
+@c = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @test() local_unnamed_addr #0 {
+entry:
+ %0 = load i32, i32* @b, align 4
+ %tobool = icmp eq i32 %0, 0
+ %1 = load i32, i32* @c, align 4
+ %. = select i1 %tobool, i32 0, i32 %1
+; CHECK: r1 = <MCOperand Expr:(b)>ll
+; CHECK: r1 = *(u32 *)(r1 + 0)
+; CHECK: if r1 == 0 goto
+ ret i32 %.
+}
+
+attributes #0 = { norecurse nounwind readonly }
diff --git a/test/CodeGen/BPF/setcc.ll b/test/CodeGen/BPF/setcc.ll
index 294c493656702..7e20814da8073 100644
--- a/test/CodeGen/BPF/setcc.ll
+++ b/test/CodeGen/BPF/setcc.ll
@@ -7,7 +7,7 @@ define i16 @sccweqand(i16 %a, i16 %b) nounwind {
ret i16 %t3
}
; CHECK-LABEL: sccweqand:
-; CHECK: if r1 == r2
+; CHECK: if r1 == 0
define i16 @sccwneand(i16 %a, i16 %b) nounwind {
%t1 = and i16 %a, %b
@@ -16,7 +16,7 @@ define i16 @sccwneand(i16 %a, i16 %b) nounwind {
ret i16 %t3
}
; CHECK-LABEL: sccwneand:
-; CHECK: if r1 != r2
+; CHECK: if r1 != 0
define i16 @sccwne(i16 %a, i16 %b) nounwind {
%t1 = icmp ne i16 %a, %b
diff --git a/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll b/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
index 9e4664ad69c9b..48c5f8f4d247a 100644
--- a/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
+++ b/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
@@ -1,7 +1,6 @@
; RUN: llc < %s
; Bug: PR31341
-; XFAIL: avr
;; Date: Jul 29, 2003.
;; From: test/Programs/MultiSource/Ptrdist-bc
diff --git a/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll b/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
index a9a33d72bca25..afa2e8a72ed10 100644
--- a/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
+++ b/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
@@ -1,8 +1,5 @@
; RUN: llc < %s
-; Bug: PR31898
-; XFAIL: avr
-
; This caused ScheduleDAG to crash in EmitPhysRegCopy when searching
; the uses of a copy to a physical register without ignoring non-data
; dependence, PR10220.
diff --git a/test/CodeGen/Generic/print-mul-exp.ll b/test/CodeGen/Generic/print-mul-exp.ll
index 91c8147aaad91..1426fb59f6695 100644
--- a/test/CodeGen/Generic/print-mul-exp.ll
+++ b/test/CodeGen/Generic/print-mul-exp.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s
+; XFAIL: avr
@a_str = internal constant [8 x i8] c"a = %d\0A\00" ; <[8 x i8]*> [#uses=1]
@a_mul_str = internal constant [13 x i8] c"a * %d = %d\0A\00" ; <[13 x i8]*> [#uses=1]
diff --git a/test/CodeGen/Generic/print-mul.ll b/test/CodeGen/Generic/print-mul.ll
index 4b60d759278ac..20fb1be6edef1 100644
--- a/test/CodeGen/Generic/print-mul.ll
+++ b/test/CodeGen/Generic/print-mul.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s
+; XFAIL: avr
@a_str = internal constant [8 x i8] c"a = %d\0A\00" ; <[8 x i8]*> [#uses=1]
@b_str = internal constant [8 x i8] c"b = %d\0A\00" ; <[8 x i8]*> [#uses=1]
diff --git a/test/CodeGen/Generic/print-shift.ll b/test/CodeGen/Generic/print-shift.ll
index 56b3ec1df760d..1fda55420b595 100644
--- a/test/CodeGen/Generic/print-shift.ll
+++ b/test/CodeGen/Generic/print-shift.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s
+; XFAIL: avr
@a_str = internal constant [8 x i8] c"a = %d\0A\00" ; <[8 x i8]*> [#uses=1]
@b_str = internal constant [8 x i8] c"b = %d\0A\00" ; <[8 x i8]*> [#uses=1]
diff --git a/test/CodeGen/Generic/v-split.ll b/test/CodeGen/Generic/v-split.ll
index 91aece94fecd4..f9a1cee440ca8 100644
--- a/test/CodeGen/Generic/v-split.ll
+++ b/test/CodeGen/Generic/v-split.ll
@@ -1,8 +1,5 @@
; RUN: llc < %s
-; Bug: PR31898
-; XFAIL: avr
-
%f8 = type <8 x float>
define void @test_f8(%f8 *%P, %f8* %Q, %f8 *%S) {
diff --git a/test/CodeGen/Generic/vector-redux.ll b/test/CodeGen/Generic/vector-redux.ll
index 64562d6d94904..8efdbf85b8c04 100644
--- a/test/CodeGen/Generic/vector-redux.ll
+++ b/test/CodeGen/Generic/vector-redux.ll
@@ -1,9 +1,6 @@
; RUN: llc < %s -debug-only=isel -o /dev/null 2>&1 | FileCheck %s
; REQUIRES: asserts
-; Bug: PR31898
-; XFAIL: avr
-
@a = global [1024 x i32] zeroinitializer, align 16
define i32 @reduce_add() {
diff --git a/test/CodeGen/Generic/vector.ll b/test/CodeGen/Generic/vector.ll
index 9c0cacdcd8788..2d4dc501a53ab 100644
--- a/test/CodeGen/Generic/vector.ll
+++ b/test/CodeGen/Generic/vector.ll
@@ -1,9 +1,6 @@
; Test that vectors are scalarized/lowered correctly.
; RUN: llc < %s
-; Bug: PR31898
-; XFAIL: avr
-
%d8 = type <8 x double>
%f1 = type <1 x float>
%f2 = type <2 x float>
diff --git a/test/CodeGen/Hexagon/intrinsics/system_user.ll b/test/CodeGen/Hexagon/intrinsics/system_user.ll
index ac4c53e221d07..23473c92da911 100644
--- a/test/CodeGen/Hexagon/intrinsics/system_user.ll
+++ b/test/CodeGen/Hexagon/intrinsics/system_user.ll
@@ -1,13 +1,71 @@
-; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
-; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
-; Hexagon Programmer's Reference Manual 11.9.1 SYSTEM/USER
+; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK-CALL-NOT: call
+target triple = "hexagon"
-; Data cache prefetch
-declare void @llvm.hexagon.prefetch(i8*)
-define void @prefetch(i8* %a) {
- call void @llvm.hexagon.prefetch(i8* %a)
+; CHECK-LABEL: dc00:
+; CHECK: dcfetch
+define void @dc00(i8* nocapture readonly %p) local_unnamed_addr #0 {
+ tail call void @llvm.hexagon.prefetch(i8* %p)
ret void
}
-; CHECK: dcfetch({{.*}}+#0)
+
+; CHECK-LABEL: dc01:
+; CHECK: dccleana
+define void @dc01(i8* nocapture readonly %p) local_unnamed_addr #0 {
+entry:
+ tail call void @llvm.hexagon.Y2.dccleana(i8* %p)
+ ret void
+}
+
+; CHECK-LABEL: dc02:
+; CHECK: dccleaninva
+define void @dc02(i8* nocapture readonly %p) local_unnamed_addr #0 {
+entry:
+ tail call void @llvm.hexagon.Y2.dccleaninva(i8* %p)
+ ret void
+}
+
+; CHECK-LABEL: dc03:
+; CHECK: dcinva
+define void @dc03(i8* nocapture readonly %p) local_unnamed_addr #0 {
+entry:
+ tail call void @llvm.hexagon.Y2.dcinva(i8* %p)
+ ret void
+}
+
+; CHECK-LABEL: dc04:
+; CHECK: dczeroa
+define void @dc04(i8* nocapture %p) local_unnamed_addr #0 {
+entry:
+ tail call void @llvm.hexagon.Y2.dczeroa(i8* %p)
+ ret void
+}
+
+; CHECK-LABEL: dc05:
+; CHECK: l2fetch(r{{[0-9]+}},r{{[0-9]+}})
+define void @dc05(i8* nocapture readonly %p, i32 %q) local_unnamed_addr #0 {
+entry:
+ tail call void @llvm.hexagon.Y4.l2fetch(i8* %p, i32 %q)
+ ret void
+}
+
+; CHECK-LABEL: dc06:
+; CHECK: l2fetch(r{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}})
+define void @dc06(i8* nocapture readonly %p, i64 %q) local_unnamed_addr #0 {
+entry:
+ tail call void @llvm.hexagon.Y5.l2fetch(i8* %p, i64 %q)
+ ret void
+}
+
+declare void @llvm.hexagon.prefetch(i8* nocapture) #1
+declare void @llvm.hexagon.Y2.dccleana(i8* nocapture readonly) #2
+declare void @llvm.hexagon.Y2.dccleaninva(i8* nocapture readonly) #2
+declare void @llvm.hexagon.Y2.dcinva(i8* nocapture readonly) #2
+declare void @llvm.hexagon.Y2.dczeroa(i8* nocapture) #3
+declare void @llvm.hexagon.Y4.l2fetch(i8* nocapture readonly, i32) #2
+declare void @llvm.hexagon.Y5.l2fetch(i8* nocapture readonly, i64) #2
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
+attributes #1 = { inaccessiblemem_or_argmemonly nounwind }
+attributes #2 = { nounwind }
+attributes #3 = { argmemonly nounwind writeonly }
diff --git a/test/CodeGen/Hexagon/switch-lut-explicit-section.ll b/test/CodeGen/Hexagon/switch-lut-explicit-section.ll
new file mode 100644
index 0000000000000..6c67a0dab1a8c
--- /dev/null
+++ b/test/CodeGen/Hexagon/switch-lut-explicit-section.ll
@@ -0,0 +1,32 @@
+;RUN: llc -O2 -hexagon-emit-lut-text=true < %s | FileCheck --check-prefix=FUNCTEXT %s
+;RUN: llc -O2 -hexagon-emit-lut-text=true -function-sections < %s | FileCheck --check-prefix=FUNCTEXT %s
+
+;This test checks the placement of lookup table in explicit section from the attribute set.
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon-unknown--elf"
+
+;FUNCTEXT: .text
+;FUNCTEXT: .section{{.*}}tcm.hexagon,
+;FUNCTEXT-NOT: .section{{.*}}.rodata
+;FUNCTEXT-NOT: .text
+;FUNCTEXT: .Lswitch.table:
+;FUNCTEXT-NEXT: .word
+
+@switch.table = private unnamed_addr constant [9 x i32] [i32 9, i32 20, i32 14, i32 22, i32 12, i32 5, i32 98, i32 8, i32 11] #0
+
+; Function Attrs: norecurse nounwind readnone
+define i32 @foo(i32 %x) local_unnamed_addr #0 section "tcm.hexagon" {
+entry:
+ %0 = icmp ult i32 %x, 9
+ br i1 %0, label %switch.lookup, label %return
+
+switch.lookup: ; preds = %entry
+ %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
+ %switch.load = load i32, i32* %switch.gep, align 4
+ ret i32 %switch.load
+
+return: ; preds = %entry
+ ret i32 19
+}
+
+attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/switch-lut-function-section.ll b/test/CodeGen/Hexagon/switch-lut-function-section.ll
new file mode 100644
index 0000000000000..bb2b1e798c8ab
--- /dev/null
+++ b/test/CodeGen/Hexagon/switch-lut-function-section.ll
@@ -0,0 +1,30 @@
+;RUN: llc -O2 -hexagon-emit-lut-text=true -function-sections < %s | FileCheck --check-prefix=FUNCTEXT %s
+
+;This test checks the placement of lookup table in function's text section.
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon-unknown--elf"
+
+;FUNCTEXT: .text
+;FUNCTEXT: .section{{.*}}text.foo,
+;FUNCTEXT-NOT: .section{{.*}}.rodata
+;FUNCTEXT: .Lswitch.table:
+;FUNCTEXT-NEXT: .word
+
+@switch.table = private unnamed_addr constant [9 x i32] [i32 9, i32 20, i32 14, i32 22, i32 12, i32 5, i32 98, i32 8, i32 11] #0
+
+; Function Attrs: norecurse nounwind readnone
+define i32 @foo(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = icmp ult i32 %x, 9
+ br i1 %0, label %switch.lookup, label %return
+
+switch.lookup: ; preds = %entry
+ %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
+ %switch.load = load i32, i32* %switch.gep, align 4
+ ret i32 %switch.load
+
+return: ; preds = %entry
+ ret i32 19
+}
+
+attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/switch-lut-multiple-functions.ll b/test/CodeGen/Hexagon/switch-lut-multiple-functions.ll
new file mode 100644
index 0000000000000..57fdfbf33abce
--- /dev/null
+++ b/test/CodeGen/Hexagon/switch-lut-multiple-functions.ll
@@ -0,0 +1,42 @@
+;RUN: llc -O2 -hexagon-emit-lut-text=true < %s | FileCheck --check-prefix=TEXT %s
+;If the look up table is used by more than one function, we should ignore the
+;flag and place it the rodata.
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon-unknown--elf"
+
+;TEXT: .text
+;TEXT: .section{{.*}}.rodata
+;TEXT: .Lswitch.table:
+;TEXT-NEXT: .word
+@switch.table = private unnamed_addr constant [9 x i32] [i32 9, i32 20, i32 14, i32 22, i32 12, i32 5, i32 98, i32 8, i32 11]
+
+; Function Attrs: norecurse nounwind readnone
+define i32 @foo(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = icmp ult i32 %x, 9
+ br i1 %0, label %switch.lookup, label %return
+
+switch.lookup: ; preds = %entry
+ %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
+ %switch.load = load i32, i32* %switch.gep, align 4
+ ret i32 %switch.load
+
+return: ; preds = %entry
+ ret i32 19
+}
+
+define i32 @goo(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = icmp ult i32 %x, 9
+ br i1 %0, label %switch.lookup, label %return
+
+switch.lookup: ; preds = %entry
+ %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
+ %switch.load = load i32, i32* %switch.gep, align 4
+ ret i32 %switch.load
+
+return: ; preds = %entry
+ ret i32 19
+}
+
+attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/switch-lut-text-section.ll b/test/CodeGen/Hexagon/switch-lut-text-section.ll
new file mode 100644
index 0000000000000..b4d3e898d1034
--- /dev/null
+++ b/test/CodeGen/Hexagon/switch-lut-text-section.ll
@@ -0,0 +1,27 @@
+;RUN: llc -O2 -hexagon-emit-lut-text=true < %s | FileCheck --check-prefix=TEXT %s
+;This test checks the placement of lookup table in text section.
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon-unknown--elf"
+
+;TEXT: .text
+;TEXT-NOT: .section{{.*}}.rodata
+;TEXT: .Lswitch.table:
+;TEXT-NEXT: .word
+@switch.table = private unnamed_addr constant [9 x i32] [i32 9, i32 20, i32 14, i32 22, i32 12, i32 5, i32 98, i32 8, i32 11]
+
+; Function Attrs: norecurse nounwind readnone
+define i32 @foo(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = icmp ult i32 %x, 9
+ br i1 %0, label %switch.lookup, label %return
+
+switch.lookup: ; preds = %entry
+ %switch.gep = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table, i32 0, i32 %x
+ %switch.load = load i32, i32* %switch.gep, align 4
+ ret i32 %switch.load
+
+return: ; preds = %entry
+ ret i32 19
+}
+
+attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/v6vec-vprint.ll b/test/CodeGen/Hexagon/v6vec-vprint.ll
index 224547c24b757..24daeac3fb5de 100644
--- a/test/CodeGen/Hexagon/v6vec-vprint.ll
+++ b/test/CodeGen/Hexagon/v6vec-vprint.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-hexagon-hvx -disable-hexagon-shuffle=0 -O2 -enable-hexagon-vector-print < %s | FileCheck --check-prefix=CHECK %s
+; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-hexagon-hvx -disable-hexagon-shuffle=0 -O2 -enable-hexagon-vector-print < %s | FileCheck %s
; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-hexagon-hvx -disable-hexagon-shuffle=0 -O2 -enable-hexagon-vector-print -trace-hex-vector-stores-only < %s | FileCheck --check-prefix=VSTPRINT %s
; generate .long XXXX which is a vector debug print instruction.
; CHECK: .long 0x1dffe0
diff --git a/test/CodeGen/Hexagon/vect/vect-load-v4i16.ll b/test/CodeGen/Hexagon/vect/vect-load-v4i16.ll
new file mode 100644
index 0000000000000..32abb75f20f4c
--- /dev/null
+++ b/test/CodeGen/Hexagon/vect/vect-load-v4i16.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+
+; CHECK-LABEL: danny:
+; CHECK-DAG: [[T0:r[0-9]+]] = memuh(r0+#0)
+; CHECK-DAG: [[T1:r[0-9]+]] = memuh(r0+#2)
+; CHECK: [[T0]] |= asl([[T1]],#16)
+; CHECK-DAG: [[T2:r[0-9]+]] = memuh(r0+#4)
+; CHECK-DAG: [[T3:r[0-9]+]] = memuh(r0+#6)
+; CHECK: [[T2]] |= asl([[T3]],#16)
+; CHECK: combine([[T2]],[[T0]])
+define <4 x i16> @danny(<4 x i16>* %p) {
+ %t0 = load <4 x i16>, <4 x i16>* %p, align 2
+ ret <4 x i16> %t0
+}
+
+; CHECK-LABEL: sammy:
+; CHECK-DAG: [[T0:r[0-9]+]] = memw(r0+#0)
+; CHECK-DAG: [[T1:r[0-9]+]] = memw(r0+#4)
+; CHECK: combine([[T1]],[[T0]])
+define <4 x i16> @sammy(<4 x i16>* %p) {
+ %t0 = load <4 x i16>, <4 x i16>* %p, align 4
+ ret <4 x i16> %t0
+}
diff --git a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll b/test/CodeGen/Hexagon/vect/vect-v4i16.ll
index f49a1e24a1bbe..f49a1e24a1bbe 100644
--- a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll
+++ b/test/CodeGen/Hexagon/vect/vect-v4i16.ll
diff --git a/test/CodeGen/MIR/AArch64/target-memoperands.mir b/test/CodeGen/MIR/AArch64/target-memoperands.mir
index f853b551e0986..c71302d97e2e4 100644
--- a/test/CodeGen/MIR/AArch64/target-memoperands.mir
+++ b/test/CodeGen/MIR/AArch64/target-memoperands.mir
@@ -10,13 +10,17 @@
---
# CHECK-LABEL: name: target_memoperands
# CHECK: %1(s64) = G_LOAD %0(p0) :: ("aarch64-suppress-pair" load 8)
+# CHECK: %2(s32) = G_LOAD %0(p0) :: ("aarch64-strided-access" load 4)
# CHECK: G_STORE %1(s64), %0(p0) :: ("aarch64-suppress-pair" store 8)
+# CHECK: G_STORE %2(s32), %0(p0) :: ("aarch64-strided-access" store 4)
name: target_memoperands
body: |
bb.0:
%0:_(p0) = COPY %x0
%1:_(s64) = G_LOAD %0(p0) :: ("aarch64-suppress-pair" load 8)
+ %2:_(s32) = G_LOAD %0(p0) :: ("aarch64-strided-access" load 4)
G_STORE %1(s64), %0(p0) :: ("aarch64-suppress-pair" store 8)
+ G_STORE %2(s32), %0(p0) :: ("aarch64-strided-access" store 4)
RET_ReallyLR
...
diff --git a/test/CodeGen/MIR/AMDGPU/fold-multiple.mir b/test/CodeGen/MIR/AMDGPU/fold-multiple.mir
new file mode 100644
index 0000000000000..a5da33a997d39
--- /dev/null
+++ b/test/CodeGen/MIR/AMDGPU/fold-multiple.mir
@@ -0,0 +1,40 @@
+# RUN: llc --mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -run-pass si-fold-operands,si-shrink-instructions %s -o - | FileCheck %s
+--- |
+ define amdgpu_kernel void @test() #0 {
+ ret void
+ }
+
+ attributes #0 = { nounwind }
+
+...
+---
+
+# This used to crash / trigger an assertion, because re-scanning the use list
+# after constant-folding the definition of %3 lead to the definition of %2
+# being processed twice.
+
+# CHECK-LABEL: name: test
+# CHECK: %2 = V_LSHLREV_B32_e32 2, killed %0, implicit %exec
+# CHECK: %4 = V_AND_B32_e32 8, killed %2, implicit %exec
+
+name: test
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: sreg_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: sreg_32 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: sreg_128 }
+body: |
+ bb.0 (%ir-block.0):
+ %0 = IMPLICIT_DEF
+ %1 = S_MOV_B32 2
+ %2 = V_LSHLREV_B32_e64 %1, killed %0, implicit %exec
+ %3 = S_LSHL_B32 %1, killed %1, implicit-def dead %scc
+ %4 = V_AND_B32_e64 killed %2, killed %3, implicit %exec
+ %5 = IMPLICIT_DEF
+ BUFFER_STORE_DWORD_OFFSET killed %4, killed %5, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/MSP430/vararg.ll b/test/CodeGen/MSP430/vararg.ll
index 4baf499848fd8..3501861f5757d 100644
--- a/test/CodeGen/MSP430/vararg.ll
+++ b/test/CodeGen/MSP430/vararg.ll
@@ -39,11 +39,11 @@ entry:
; CHECK-LABEL: va_copy:
%vl.addr = alloca i8*, align 2
%vl2 = alloca i8*, align 2
-; CHECK: mov.w r12, 2(r1)
+; CHECK-DAG: mov.w r12, 2(r1)
store i8* %vl, i8** %vl.addr, align 2
%0 = bitcast i8** %vl2 to i8*
%1 = bitcast i8** %vl.addr to i8*
-; CHECK-NEXT: mov.w r12, 0(r1)
+; CHECK-DAG: mov.w r12, 0(r1)
call void @llvm.va_copy(i8* %0, i8* %1)
ret void
}
diff --git a/test/CodeGen/Mips/2008-06-05-Carry.ll b/test/CodeGen/Mips/2008-06-05-Carry.ll
index c61e1cdedea78..5e6092fc7848d 100644
--- a/test/CodeGen/Mips/2008-06-05-Carry.ll
+++ b/test/CodeGen/Mips/2008-06-05-Carry.ll
@@ -2,20 +2,21 @@
define i64 @add64(i64 %u, i64 %v) nounwind {
entry:
+; CHECK-LABEL: add64:
; CHECK: addu
-; CHECK: sltu
+; CHECK-DAG: sltu
+; CHECK-DAG: addu
; CHECK: addu
-; CHECK: addu
- %tmp2 = add i64 %u, %v
+ %tmp2 = add i64 %u, %v
ret i64 %tmp2
}
define i64 @sub64(i64 %u, i64 %v) nounwind {
entry:
-; CHECK: sub64
+; CHECK-LABEL: sub64
+; CHECK-DAG: sltu
+; CHECK-DAG: subu
; CHECK: subu
-; CHECK: sltu
-; CHECK: addu
; CHECK: subu
%tmp2 = sub i64 %u, %v
ret i64 %tmp2
diff --git a/test/CodeGen/Mips/dins.ll b/test/CodeGen/Mips/dins.ll
index 5c04157592660..2aa824250d3b4 100644
--- a/test/CodeGen/Mips/dins.ll
+++ b/test/CodeGen/Mips/dins.ll
@@ -59,9 +59,9 @@ entry:
; CHECK-LABEL: f123:
; MIPS64R2: daddiu $[[R0:[0-9]+]], $zero, 123
; MIPS64R2: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 27, 37
-; MIPS64R2: daddiu $[[R0:[0-9]+]], $zero, 5
; MIPS64R2: daddiu $[[R0:[0-9]+]], $zero, 4
; MIPS64R2: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 28, 6
+; MIPS64R2: daddiu $[[R0:[0-9]+]], $zero, 5
; MIPS64R2: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 50, 14
; MIPS64R2: dsrl $[[R0:[0-9]+]], $[[R1:[0-9]+]], 50
; MIPS64R2: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 34, 16
@@ -94,4 +94,4 @@ entry:
; MIPS32R2: ori $[[R0:[0-9]+]], $[[R0:[0-9]+]], 8
; MIPS32R2-NOT: ins {{[[:space:]].*}}
; MIPS64R2N32: ori $[[R0:[0-9]+]], $[[R0:[0-9]+]], 8
-; MIPS64R2N32-NOT: ins {{[[:space:]].*}} \ No newline at end of file
+; MIPS64R2N32-NOT: ins {{[[:space:]].*}}
diff --git a/test/CodeGen/Mips/dsp-patterns.ll b/test/CodeGen/Mips/dsp-patterns.ll
index 837c0d8bfc52b..250d3eff37dc5 100644
--- a/test/CodeGen/Mips/dsp-patterns.ll
+++ b/test/CodeGen/Mips/dsp-patterns.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s -check-prefix=R1
-; RUN: llc -march=mips -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
+; RUN: llc -march=mips -mcpu=mips32r2 -mattr=dsp < %s | FileCheck %s -check-prefix=R1
+; RUN: llc -march=mips -mcpu=mips32r2 -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
; R1-LABEL: test_lbux:
; R1: lbux ${{[0-9]+}}
diff --git a/test/CodeGen/Mips/llcarry.ll b/test/CodeGen/Mips/llcarry.ll
index fcf129420234c..b7cc6fc8ea757 100644
--- a/test/CodeGen/Mips/llcarry.ll
+++ b/test/CodeGen/Mips/llcarry.ll
@@ -14,9 +14,9 @@ entry:
%add = add nsw i64 %1, %0
store i64 %add, i64* @k, align 8
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $t8
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
+; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
+; 16: move ${{[0-9]+}}, $24
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
ret void
}
@@ -28,8 +28,8 @@ entry:
%sub = sub nsw i64 %0, %1
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $t8
-; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
+; 16: move ${{[0-9]+}}, $24
+; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
store i64 %sub, i64* @l, align 8
ret void
@@ -41,8 +41,7 @@ entry:
%add = add nsw i64 %0, 15
; 16: addiu ${{[0-9]+}}, 15
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $t8
-; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
+; 16: move ${{[0-9]+}}, $24
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
store i64 %add, i64* @m, align 8
ret void
diff --git a/test/CodeGen/Mips/llvm-ir/add.ll b/test/CodeGen/Mips/llvm-ir/add.ll
index a5ecdda94ce2f..63884eb03b8c5 100644
--- a/test/CodeGen/Mips/llvm-ir/add.ll
+++ b/test/CodeGen/Mips/llvm-ir/add.ll
@@ -1,35 +1,35 @@
; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32,PRE4
; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32,GP32-CMOV
; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32
+; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32
+; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32
+; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64
+; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64
+; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64
+; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64
+; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -O2 -verify-machineinstrs | FileCheck %s \
-; RUN: -check-prefixes=ALL,MMR6,MM32
+; RUN: -check-prefixes=ALL,MMR3,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -O2 | FileCheck %s \
; RUN: -check-prefixes=ALL,MMR6,MM32
; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -O2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,MMR6,MM64
+; RUN: -check-prefixes=ALL,MM64
; FIXME: This code sequence is inefficient as it should be 'subu $[[T0]], $zero, $[[T0]'.
@@ -110,17 +110,17 @@ define signext i64 @add_i64(i64 signext %a, i64 signext %b) {
entry:
; ALL-LABEL: add_i64:
- ; GP32: addu $3, $5, $7
- ; GP32: sltu $[[T0:[0-9]+]], $3, $7
- ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
- ; GP32: addu $2, $4, $[[T1]]
+ ; GP32-DAG: addu $[[T0:[0-9]+]], $4, $6
+ ; GP32-DAG: addu $3, $5, $7
+ ; GP32: sltu $[[T1:[0-9]+]], $3, $5
+ ; GP32: addu $2, $[[T0]], $[[T1]]
; GP64: daddu $2, $4, $5
- ; MM32: addu16 $3, $5, $7
- ; MM32: sltu $[[T0:[0-9]+]], $3, $7
- ; MM32: addu $[[T1:[0-9]+]], $[[T0]], $6
- ; MM32: addu $2, $4, $[[T1]]
+ ; MM32-DAG: addu16 $3, $5, $7
+ ; MM32-DAG: addu16 $[[T0:[0-9]+]], $4, $6
+ ; MM32: sltu $[[T1:[0-9]+]], $3, $5
+ ; MM32: addu16 $2, $[[T0]], $[[T1]]
; MM64: daddu $2, $4, $5
@@ -132,49 +132,108 @@ define signext i128 @add_i128(i128 signext %a, i128 signext %b) {
entry:
; ALL-LABEL: add_i128:
- ; GP32: lw $[[T0:[0-9]+]], 28($sp)
- ; GP32: addu $[[T1:[0-9]+]], $7, $[[T0]]
- ; GP32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
- ; GP32: lw $[[T3:[0-9]+]], 24($sp)
- ; GP32: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
- ; GP32: addu $[[T5:[0-9]+]], $6, $[[T4]]
- ; GP32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
- ; GP32: lw $[[T7:[0-9]+]], 20($sp)
- ; GP32: addu $[[T8:[0-9]+]], $[[T6]], $[[T7]]
- ; GP32: lw $[[T9:[0-9]+]], 16($sp)
- ; GP32: addu $3, $5, $[[T8]]
- ; GP32: sltu $[[T10:[0-9]+]], $3, $[[T7]]
- ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T9]]
- ; GP32: addu $2, $4, $[[T11]]
- ; GP32: move $4, $[[T5]]
- ; GP32: move $5, $[[T1]]
-
- ; GP64: daddu $3, $5, $7
- ; GP64: sltu $[[T0:[0-9]+]], $3, $7
- ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
- ; GP64: daddu $2, $4, $[[T1]]
-
- ; MM32: lw $[[T0:[0-9]+]], 28($sp)
- ; MM32: addu $[[T1:[0-9]+]], $7, $[[T0]]
- ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
- ; MM32: lw $[[T3:[0-9]+]], 24($sp)
- ; MM32: addu16 $[[T4:[0-9]+]], $[[T2]], $[[T3]]
- ; MM32: addu16 $[[T5:[0-9]+]], $6, $[[T4]]
- ; MM32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
- ; MM32: lw $[[T7:[0-9]+]], 20($sp)
- ; MM32: addu16 $[[T8:[0-9]+]], $[[T6]], $[[T7]]
- ; MM32: lw $[[T9:[0-9]+]], 16($sp)
- ; MM32: addu16 $[[T10:[0-9]+]], $5, $[[T8]]
- ; MM32: sltu $[[T11:[0-9]+]], $[[T10]], $[[T7]]
- ; MM32: addu $[[T12:[0-9]+]], $[[T11]], $[[T9]]
- ; MM32: addu16 $[[T13:[0-9]+]], $4, $[[T12]]
- ; MM32: move $4, $[[T5]]
- ; MM32: move $5, $[[T1]]
-
+ ; PRE4: move $[[R1:[0-9]+]], $5
+ ; PRE4: move $[[R2:[0-9]+]], $4
+ ; PRE4: lw $[[R3:[0-9]+]], 24($sp)
+ ; PRE4: addu $[[R4:[0-9]+]], $6, $[[R3]]
+ ; PRE4: lw $[[R5:[0-9]+]], 28($sp)
+ ; PRE4: addu $[[R6:[0-9]+]], $7, $[[R5]]
+ ; PRE4: sltu $[[R7:[0-9]+]], $[[R6]], $7
+ ; PRE4: addu $[[R8:[0-9]+]], $[[R4]], $[[R7]]
+ ; PRE4: xor $[[R9:[0-9]+]], $[[R8]], $6
+ ; PRE4: sltiu $[[R10:[0-9]+]], $[[R9]], 1
+ ; PRE4: bnez $[[R10]], $BB5_2
+ ; PRE4: sltu $[[R7]], $[[R8]], $6
+ ; PRE4: lw $[[R12:[0-9]+]], 20($sp)
+ ; PRE4: addu $[[R13:[0-9]+]], $[[R1]], $[[R12]]
+ ; PRE4: lw $[[R14:[0-9]+]], 16($sp)
+ ; PRE4: addu $[[R15:[0-9]+]], $[[R13]], $[[R7]]
+ ; PRE4: addu $[[R16:[0-9]+]], $[[R2]], $[[R14]]
+ ; PRE4: sltu $[[R17:[0-9]+]], $[[R15]], $[[R13]]
+ ; PRE4: sltu $[[R18:[0-9]+]], $[[R13]], $[[R1]]
+ ; PRE4: addu $[[R19:[0-9]+]], $[[R16]], $[[R18]]
+ ; PRE4: addu $2, $[[R19]], $[[R17]]
+
+ ; GP32-CMOV: lw $[[T0:[0-9]+]], 24($sp)
+ ; GP32-CMOV: addu $[[T1:[0-9]+]], $6, $[[T0]]
+ ; GP32-CMOV: lw $[[T2:[0-9]+]], 28($sp)
+ ; GP32-CMOV: addu $[[T3:[0-9]+]], $7, $[[T2]]
+ ; GP32-CMOV: sltu $[[T4:[0-9]+]], $[[T3]], $7
+ ; GP32-CMOV: addu $[[T5:[0-9]+]], $[[T1]], $[[T4]]
+ ; GP32-CMOV: sltu $[[T6:[0-9]+]], $[[T5]], $6
+ ; GP32-CMOV: xor $[[T7:[0-9]+]], $[[T5]], $6
+ ; GP32-CMOV: movz $[[T8:[0-9]+]], $[[T4]], $[[T7]]
+ ; GP32-CMOV: lw $[[T9:[0-9]+]], 20($sp)
+ ; GP32-CMOV: addu $[[T10:[0-9]+]], $5, $[[T4]]
+ ; GP32-CMOV: addu $[[T11:[0-9]+]], $[[T10]], $[[T8]]
+ ; GP32-CMOV: lw $[[T12:[0-9]+]], 16($sp)
+ ; GP32-CMOV: sltu $[[T13:[0-9]+]], $[[T11]], $[[T10]]
+ ; GP32-CMOV: addu $[[T14:[0-9]+]], $4, $[[T12]]
+ ; GP32-CMOV: sltu $[[T15:[0-9]+]], $[[T10]], $5
+ ; GP32-CMOV: addu $[[T16:[0-9]+]], $[[T14]], $[[T15]]
+ ; GP32-CMOV: addu $[[T17:[0-9]+]], $[[T16]], $[[T13]]
+ ; GP32-CMOV: move $4, $[[T5]]
+ ; GP32-CMOV: move $5, $[[T3]]
+
+ ; GP64: daddu $[[T0:[0-9]+]], $4, $6
+ ; GP64: daddu $[[T1:[0-9]+]], $5, $7
+ ; GP64: sltu $[[T2:[0-9]+]], $[[T1]], $5
+ ; GP64-NOT-R2-R6: dsll $[[T3:[0-9]+]], $[[T2]], 32
+ ; GP64-NOT-R2-R6: dsrl $[[T4:[0-9]+]], $[[T3]], 32
+ ; GP64-R2-R6: dext $[[T4:[0-9]+]], $[[T2]], 0, 32
+
+ ; GP64: daddu $2, $[[T0]], $[[T4]]
+
+ ; MMR3: move $[[T1:[0-9]+]], $5
+ ; MMR3-DAG: lw $[[T2:[0-9]+]], 32($sp)
+ ; MMR3: addu16 $[[T3:[0-9]+]], $6, $[[T2]]
+ ; MMR3-DAG: lw $[[T4:[0-9]+]], 36($sp)
+ ; MMR3: addu16 $[[T5:[0-9]+]], $7, $[[T4]]
+ ; MMR3: sltu $[[T6:[0-9]+]], $[[T5]], $7
+ ; MMR3: addu16 $[[T7:[0-9]+]], $[[T3]], $[[T6]]
+ ; MMR3: sltu $[[T8:[0-9]+]], $[[T7]], $6
+ ; MMR3: xor $[[T9:[0-9]+]], $[[T7]], $6
+ ; MMR3: movz $[[T8]], $[[T6]], $[[T9]]
+ ; MMR3: lw $[[T10:[0-9]+]], 28($sp)
+ ; MMR3: addu16 $[[T11:[0-9]+]], $[[T1]], $[[T10]]
+ ; MMR3: addu16 $[[T12:[0-9]+]], $[[T11]], $[[T8]]
+ ; MMR3: lw $[[T13:[0-9]+]], 24($sp)
+ ; MMR3: sltu $[[T14:[0-9]+]], $[[T12]], $[[T11]]
+ ; MMR3: addu16 $[[T15:[0-9]+]], $4, $[[T13]]
+ ; MMR3: sltu $[[T16:[0-9]+]], $[[T11]], $[[T1]]
+ ; MMR3: addu16 $[[T17:[0-9]+]], $[[T15]], $[[T16]]
+ ; MMR3: addu16 $2, $2, $[[T14]]
+
+ ; MMR6: move $[[T1:[0-9]+]], $5
+ ; MMR6: move $[[T2:[0-9]+]], $4
+ ; MMR6: lw $[[T3:[0-9]+]], 32($sp)
+ ; MMR6: addu16 $[[T4:[0-9]+]], $6, $[[T3]]
+ ; MMR6: lw $[[T5:[0-9]+]], 36($sp)
+ ; MMR6: addu16 $[[T6:[0-9]+]], $7, $[[T5]]
+ ; MMR6: sltu $[[T7:[0-9]+]], $[[T6]], $7
+ ; MMR6: addu16 $[[T8:[0-9]+]], $[[T4]], $7
+ ; MMR6: sltu $[[T9:[0-9]+]], $[[T8]], $6
+ ; MMR6: xor $[[T10:[0-9]+]], $[[T4]], $6
+ ; MMR6: sltiu $[[T11:[0-9]+]], $[[T10]], 1
+ ; MMR6: seleqz $[[T12:[0-9]+]], $[[T9]], $[[T11]]
+ ; MMR6: selnez $[[T13:[0-9]+]], $[[T7]], $[[T11]]
+ ; MMR6: lw $[[T14:[0-9]+]], 24($sp)
+ ; MMR6: or $[[T15:[0-9]+]], $[[T13]], $[[T12]]
+ ; MMR6: addu16 $[[T16:[0-9]+]], $[[T2]], $[[T14]]
+ ; MMR6: lw $[[T17:[0-9]+]], 28($sp)
+ ; MMR6: addu16 $[[T18:[0-9]+]], $[[T1]], $[[T17]]
+ ; MMR6: addu16 $[[T19:[0-9]+]], $[[T18]], $[[T15]]
+ ; MMR6: sltu $[[T20:[0-9]+]], $[[T18]], $[[T1]]
+ ; MMR6: sltu $[[T21:[0-9]+]], $[[T17]], $[[T18]]
+ ; MMR6: addu16 $2, $[[T16]], $[[T20]]
+ ; MMR6: addu16 $2, $[[T20]], $[[T21]]
+
+ ; MM64: daddu $[[T0:[0-9]+]], $4, $6
; MM64: daddu $3, $5, $7
- ; MM64: sltu $[[T0:[0-9]+]], $3, $7
- ; MM64: daddu $[[T1:[0-9]+]], $[[T0]], $6
- ; MM64: daddu $2, $4, $[[T1]]
+ ; MM64: sltu $[[T1:[0-9]+]], $3, $5
+ ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
+ ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+ ; MM64: daddu $2, $[[T0]], $[[T3]]
%r = add i128 %a, %b
ret i128 %r
@@ -249,17 +308,16 @@ define signext i32 @add_i32_4(i32 signext %a) {
define signext i64 @add_i64_4(i64 signext %a) {
; ALL-LABEL: add_i64_4:
- ; GP32: addiu $[[T0:[0-9]+]], $5, 4
- ; GP32: addiu $[[T1:[0-9]+]], $zero, 4
- ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
- ; GP32: addu $2, $4, $[[T1]]
+ ; GP32: addiu $3, $5, 4
+ ; GP32: sltu $[[T0:[0-9]+]], $3, $5
+ ; GP32: addu $2, $4, $[[T0]]
+
+ ; MM32: addiur2 $[[T1:[0-9]+]], $5, 4
+ ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $5
+ ; MM32: addu16 $2, $4, $[[T2]]
; GP64: daddiu $2, $4, 4
- ; MM32: addiu $[[T0:[0-9]+]], $5, 4
- ; MM32: li16 $[[T1:[0-9]+]], 4
- ; MM32: sltu $[[T2:[0-9]+]], $[[T0]], $[[T1]]
- ; MM32: addu $2, $4, $[[T2]]
; MM64: daddiu $2, $4, 4
@@ -270,38 +328,67 @@ define signext i64 @add_i64_4(i64 signext %a) {
define signext i128 @add_i128_4(i128 signext %a) {
; ALL-LABEL: add_i128_4:
- ; GP32: addiu $[[T0:[0-9]+]], $7, 4
- ; GP32: addiu $[[T1:[0-9]+]], $zero, 4
- ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
- ; GP32: addu $[[T2:[0-9]+]], $6, $[[T1]]
- ; GP32: sltu $[[T1]], $[[T2]], $zero
- ; GP32: addu $[[T3:[0-9]+]], $5, $[[T1]]
- ; GP32: sltu $[[T1]], $[[T3]], $zero
- ; GP32: addu $[[T1]], $4, $[[T1]]
- ; GP32: move $4, $[[T2]]
- ; GP32: move $5, $[[T0]]
-
- ; GP64: daddiu $[[T0:[0-9]+]], $5, 4
- ; GP64: daddiu $[[T1:[0-9]+]], $zero, 4
- ; GP64: sltu $[[T1]], $[[T0]], $[[T1]]
- ; GP64: daddu $2, $4, $[[T1]]
-
- ; MM32: addiu $[[T0:[0-9]+]], $7, 4
- ; MM32: li16 $[[T1:[0-9]+]], 4
- ; MM32: sltu $[[T1]], $[[T0]], $[[T1]]
- ; MM32: addu16 $[[T2:[0-9]+]], $6, $[[T1]]
- ; MM32: li16 $[[T1]], 0
- ; MM32: sltu $[[T3:[0-9]+]], $[[T2]], $[[T1]]
- ; MM32: addu16 $[[T3]], $5, $[[T3]]
- ; MM32: sltu $[[T1]], $[[T3]], $[[T1]]
- ; MM32: addu16 $[[T1]], $4, $[[T1]]
- ; MM32: move $4, $[[T2]]
- ; MM32: move $5, $[[T0]]
+ ; PRE4: move $[[T0:[0-9]+]], $5
+ ; PRE4: addiu $[[T1:[0-9]+]], $7, 4
+ ; PRE4: sltu $[[T2:[0-9]+]], $[[T1]], $7
+ ; PRE4: xori $[[T3:[0-9]+]], $[[T2]], 1
+ ; PRE4: bnez $[[T3]], $BB[[BB0:[0-9_]+]]
+ ; PRE4: addu $[[T4:[0-9]+]], $6, $[[T2]]
+ ; PRE4: sltu $[[T5:[0-9]+]], $[[T4]], $6
+ ; PRE4; $BB[[BB0:[0-9]+]]:
+ ; PRE4: addu $[[T6:[0-9]+]], $[[T0]], $[[T5]]
+ ; PRE4: sltu $[[T7:[0-9]+]], $[[T6]], $[[T0]]
+ ; PRE4: addu $[[T8:[0-9]+]], $4, $[[T7]]
+ ; PRE4: move $4, $[[T4]]
+
+ ; GP32-CMOV: addiu $[[T0:[0-9]+]], $7, 4
+ ; GP32-CMOV: sltu $[[T1:[0-9]+]], $[[T0]], $7
+ ; GP32-CMOV: addu $[[T2:[0-9]+]], $6, $[[T1]]
+ ; GP32-CMOV: sltu $[[T3:[0-9]+]], $[[T2]], $6
+ ; GP32-CMOV: movz $[[T3]], $[[T1]], $[[T1]]
+ ; GP32-CMOV: addu $[[T4:[0-9]+]], $5, $[[T3]]
+ ; GP32-CMOV: sltu $[[T5:[0-9]+]], $[[T4]], $5
+ ; GP32-CMOV: addu $[[T7:[0-9]+]], $4, $[[T5]]
+ ; GP32-CMOV: move $4, $[[T2]]
+ ; GP32-CMOV: move $5, $[[T0]]
+
+ ; GP64: daddiu $[[T0:[0-9]+]], $5, 4
+ ; GP64: sltu $[[T1:[0-9]+]], $[[T0]], $5
+ ; GP64-NOT-R2-R6: dsll $[[T2:[0-9]+]], $[[T1]], 32
+ ; GP64-NOT-R2-R6: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+ ; GP64-R2-R6: dext $[[T3:[0-9]+]], $[[T1]], 0, 32
+
+ ; GP64: daddu $2, $4, $[[T3]]
+
+ ; MMR3: addiur2 $[[T0:[0-9]+]], $7, 4
+ ; MMR3: sltu $[[T1:[0-9]+]], $[[T0]], $7
+ ; MMR3: sltu $[[T2:[0-9]+]], $[[T0]], $7
+ ; MMR3: addu16 $[[T3:[0-9]+]], $6, $[[T2]]
+ ; MMR3: sltu $[[T4:[0-9]+]], $[[T3]], $6
+ ; MMR3: movz $[[T4]], $[[T2]], $[[T1]]
+ ; MMR3: addu16 $[[T6:[0-9]+]], $5, $[[T4]]
+ ; MMR3: sltu $[[T7:[0-9]+]], $[[T6]], $5
+ ; MMR3: addu16 $2, $4, $[[T7]]
+
+ ; MMR6: addiur2 $[[T1:[0-9]+]], $7, 4
+ ; MMR6: sltu $[[T2:[0-9]+]], $[[T1]], $7
+ ; MMR6: xori $[[T3:[0-9]+]], $[[T2]], 1
+ ; MMR6: selnez $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; MMR6: addu16 $[[T5:[0-9]+]], $6, $[[T2]]
+ ; MMR6: sltu $[[T6:[0-9]+]], $[[T5]], $6
+ ; MMR6: seleqz $[[T7:[0-9]+]], $[[T6]], $[[T3]]
+ ; MMR6: or $[[T8:[0-9]+]], $[[T4]], $[[T7]]
+ ; MMR6: addu16 $[[T9:[0-9]+]], $5, $[[T8]]
+ ; MMR6: sltu $[[T10:[0-9]+]], $[[T9]], $5
+ ; MMR6: addu16 $[[T11:[0-9]+]], $4, $[[T10]]
+ ; MMR6: move $4, $7
+ ; MMR6: move $5, $[[T1]]
; MM64: daddiu $[[T0:[0-9]+]], $5, 4
- ; MM64: daddiu $[[T1:[0-9]+]], $zero, 4
- ; MM64: sltu $[[T1]], $[[T0]], $[[T1]]
- ; MM64: daddu $2, $4, $[[T1]]
+ ; MM64: sltu $[[T1:[0-9]+]], $[[T0]], $5
+ ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
+ ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+ ; MM64: daddu $2, $4, $[[T3]]
%r = add i128 4, %a
ret i128 %r
@@ -380,16 +467,15 @@ define signext i64 @add_i64_3(i64 signext %a) {
; ALL-LABEL: add_i64_3:
; GP32: addiu $[[T0:[0-9]+]], $5, 3
- ; GP32: addiu $[[T1:[0-9]+]], $zero, 3
- ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP32: sltu $[[T1:[0-9]+]], $[[T0]], $5
; GP32: addu $2, $4, $[[T1]]
; GP64: daddiu $2, $4, 3
- ; MM32: addiu $[[T0:[0-9]+]], $5, 3
- ; MM32: li16 $[[T1:[0-9]+]], 3
- ; MM32: sltu $[[T2:[0-9]+]], $[[T0]], $[[T1]]
- ; MM32: addu $2, $4, $[[T2]]
+ ; MM32: move $[[T1:[0-9]+]], $5
+ ; MM32: addius5 $[[T1]], 3
+ ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $5
+ ; MM32: addu16 $2, $4, $[[T2]]
; MM64: daddiu $2, $4, 3
@@ -400,38 +486,70 @@ define signext i64 @add_i64_3(i64 signext %a) {
define signext i128 @add_i128_3(i128 signext %a) {
; ALL-LABEL: add_i128_3:
- ; GP32: addiu $[[T0:[0-9]+]], $7, 3
- ; GP32: addiu $[[T1:[0-9]+]], $zero, 3
- ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
- ; GP32: addu $[[T2:[0-9]+]], $6, $[[T1]]
- ; GP32: sltu $[[T3:[0-9]+]], $[[T2]], $zero
- ; GP32: addu $[[T4:[0-9]+]], $5, $[[T3]]
- ; GP32: sltu $[[T5:[0-9]+]], $[[T4]], $zero
- ; GP32: addu $[[T5]], $4, $[[T5]]
- ; GP32: move $4, $[[T2]]
- ; GP32: move $5, $[[T0]]
-
- ; GP64: daddiu $[[T0:[0-9]+]], $5, 3
- ; GP64: daddiu $[[T1:[0-9]+]], $zero, 3
- ; GP64: sltu $[[T1]], $[[T0]], $[[T1]]
- ; GP64: daddu $2, $4, $[[T1]]
-
- ; MM32: addiu $[[T0:[0-9]+]], $7, 3
- ; MM32: li16 $[[T1:[0-9]+]], 3
- ; MM32: sltu $[[T1]], $[[T0]], $[[T1]]
- ; MM32: addu16 $[[T2:[0-9]+]], $6, $[[T1]]
- ; MM32: li16 $[[T3:[0-9]+]], 0
- ; MM32: sltu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
- ; MM32: addu16 $[[T4]], $5, $[[T4]]
- ; MM32: sltu $[[T5:[0-9]+]], $[[T4]], $[[T3]]
- ; MM32: addu16 $[[T5]], $4, $[[T5]]
- ; MM32: move $4, $[[T2]]
- ; MM32: move $5, $[[T0]]
+ ; PRE4: move $[[T0:[0-9]+]], $5
+ ; PRE4: addiu $[[T1:[0-9]+]], $7, 3
+ ; PRE4: sltu $[[T2:[0-9]+]], $[[T1]], $7
+ ; PRE4: xori $[[T3:[0-9]+]], $[[T2]], 1
+ ; PRE4: bnez $[[T3]], $BB[[BB0:[0-9_]+]]
+ ; PRE4: addu $[[T4:[0-9]+]], $6, $[[T2]]
+ ; PRE4: sltu $[[T5:[0-9]+]], $[[T4]], $6
+ ; PRE4; $BB[[BB0:[0-9]+]]:
+ ; PRE4: addu $[[T6:[0-9]+]], $[[T0]], $[[T5]]
+ ; PRE4: sltu $[[T7:[0-9]+]], $[[T6]], $[[T0]]
+ ; PRE4: addu $[[T8:[0-9]+]], $4, $[[T7]]
+ ; PRE4: move $4, $[[T4]]
+
+ ; GP32-CMOV: addiu $[[T0:[0-9]+]], $7, 3
+ ; GP32-CMOV: sltu $[[T1:[0-9]+]], $[[T0]], $7
+ ; GP32-CMOV: addu $[[T2:[0-9]+]], $6, $[[T1]]
+ ; GP32-CMOV: sltu $[[T3:[0-9]+]], $[[T2]], $6
+ ; GP32-CMOV: movz $[[T3]], $[[T1]], $[[T1]]
+ ; GP32-CMOV: addu $[[T4:[0-9]+]], $5, $[[T3]]
+ ; GP32-CMOV: sltu $[[T5:[0-9]+]], $[[T4]], $5
+ ; GP32-CMOV: addu $[[T7:[0-9]+]], $4, $[[T5]]
+ ; GP32-CMOV: move $4, $[[T2]]
+ ; GP32-CMOV: move $5, $[[T0]]
+
+ ; GP64: daddiu $[[T0:[0-9]+]], $5, 3
+ ; GP64: sltu $[[T1:[0-9]+]], $[[T0]], $5
+
+ ; GP64-NOT-R2-R6: dsll $[[T2:[0-9]+]], $[[T1]], 32
+ ; GP64-NOT-R2-R6: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+ ; GP64-R2-R6: dext $[[T3:[0-9]+]], $[[T1]], 0, 32
+
+ ; GP64: daddu $2, $4, $[[T3]]
+
+ ; MMR3: move $[[T1:[0-9]+]], $7
+ ; MMR3: addius5 $[[T1]], 3
+ ; MMR3: sltu $[[T2:[0-9]+]], $[[T1]], $7
+ ; MMR3: sltu $[[T3:[0-9]+]], $[[T1]], $7
+ ; MMR3: addu16 $[[T4:[0-9]+]], $6, $[[T3]]
+ ; MMR3: sltu $[[T5:[0-9]+]], $[[T4]], $6
+ ; MMR3: movz $[[T5]], $[[T3]], $[[T2]]
+ ; MMR3: addu16 $[[T6:[0-9]+]], $5, $[[T5]]
+ ; MMR3: sltu $[[T7:[0-9]+]], $[[T6]], $5
+ ; MMR3: addu16 $2, $4, $[[T7]]
+
+ ; MMR6: move $[[T1:[0-9]+]], $7
+ ; MMR6: addius5 $[[T1]], 3
+ ; MMR6: sltu $[[T2:[0-9]+]], $[[T1]], $7
+ ; MMR6: xori $[[T3:[0-9]+]], $[[T2]], 1
+ ; MMR6: selnez $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; MMR6: addu16 $[[T5:[0-9]+]], $6, $[[T2]]
+ ; MMR6: sltu $[[T6:[0-9]+]], $[[T5]], $6
+ ; MMR6: seleqz $[[T7:[0-9]+]], $[[T6]], $[[T3]]
+ ; MMR6: or $[[T8:[0-9]+]], $[[T4]], $[[T7]]
+ ; MMR6: addu16 $[[T9:[0-9]+]], $5, $[[T8]]
+ ; MMR6: sltu $[[T10:[0-9]+]], $[[T9]], $5
+ ; MMR6: addu16 $[[T11:[0-9]+]], $4, $[[T10]]
+ ; MMR6: move $4, $[[T5]]
+ ; MMR6: move $5, $[[T1]]
; MM64: daddiu $[[T0:[0-9]+]], $5, 3
- ; MM64: daddiu $[[T1:[0-9]+]], $zero, 3
- ; MM64: sltu $[[T1]], $[[T0]], $[[T1]]
- ; MM64: daddu $2, $4, $[[T1]]
+ ; MM64: sltu $[[T1:[0-9]+]], $[[T0]], $5
+ ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
+ ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+ ; MM64: daddu $2, $4, $[[T3]]
%r = add i128 3, %a
ret i128 %r
diff --git a/test/CodeGen/Mips/llvm-ir/sub.ll b/test/CodeGen/Mips/llvm-ir/sub.ll
index a730063c552f4..655addb10a64e 100644
--- a/test/CodeGen/Mips/llvm-ir/sub.ll
+++ b/test/CodeGen/Mips/llvm-ir/sub.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
+; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM,PRE4
; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
@@ -11,25 +11,25 @@
; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
; RUN: -check-prefixes=R2-R6,GP32,GP32-NOT-MM,NOT-MM
; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -verify-machineinstrs | FileCheck %s \
-; RUN: -check-prefixes=GP32-MM,GP32,MM
+; RUN: -check-prefixes=GP32-MM,GP32,MM32,MMR3
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=GP32-MM,GP32,MM
+; RUN: -check-prefixes=GP32-MM,GP32,MM32,MMR6
; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=GP64,MM
+; RUN: -check-prefixes=GP64,MM64
define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -100,10 +100,15 @@ define signext i64 @sub_i64(i64 signext %a, i64 signext %b) {
entry:
; ALL-LABEL: sub_i64:
- ; GP32-NOT-MM subu $3, $5, $7
- ; GP32: sltu $[[T0:[0-9]+]], $5, $7
- ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
- ; GP32: subu $2, $4, $[[T1]]
+ ; GP32-NOT-MM: sltu $[[T0:[0-9]+]], $5, $7
+ ; GP32-NOT-MM: subu $2, $4, $6
+ ; GP32-NOT-MM: subu $2, $2, $[[T0]]
+ ; GP32-NOT-MM: subu $3, $5, $7
+
+ ; MM32: sltu $[[T0:[0-9]+]], $5, $7
+ ; MM32: subu16 $3, $4, $6
+ ; MM32: subu16 $2, $3, $[[T0]]
+ ; MM32: subu16 $3, $5, $7
; GP64: dsubu $2, $4, $5
@@ -115,42 +120,109 @@ define signext i128 @sub_i128(i128 signext %a, i128 signext %b) {
entry:
; ALL-LABEL: sub_i128:
- ; GP32-NOT-MM: lw $[[T0:[0-9]+]], 20($sp)
- ; GP32-NOT-MM: sltu $[[T1:[0-9]+]], $5, $[[T0]]
- ; GP32-NOT-MM: lw $[[T2:[0-9]+]], 16($sp)
- ; GP32-NOT-MM: addu $[[T3:[0-9]+]], $[[T1]], $[[T2]]
- ; GP32-NOT-MM: lw $[[T4:[0-9]+]], 24($sp)
- ; GP32-NOT-MM: lw $[[T5:[0-9]+]], 28($sp)
- ; GP32-NOT-MM: subu $[[T6:[0-9]+]], $7, $[[T5]]
- ; GP32-NOT-MM: subu $2, $4, $[[T3]]
- ; GP32-NOT-MM: sltu $[[T8:[0-9]+]], $6, $[[T4]]
- ; GP32-NOT-MM: addu $[[T9:[0-9]+]], $[[T8]], $[[T0]]
- ; GP32-NOT-MM: subu $3, $5, $[[T9]]
- ; GP32-NOT-MM: sltu $[[T10:[0-9]+]], $7, $[[T5]]
- ; GP32-NOT-MM: addu $[[T11:[0-9]+]], $[[T10]], $[[T4]]
- ; GP32-NOT-MM: subu $4, $6, $[[T11]]
- ; GP32-NOT-MM: move $5, $[[T6]]
-
- ; GP32-MM: lw $[[T0:[0-9]+]], 20($sp)
- ; GP32-MM: sltu $[[T1:[0-9]+]], $[[T2:[0-9]+]], $[[T0]]
- ; GP32-MM: lw $[[T3:[0-9]+]], 16($sp)
- ; GP32-MM: addu $[[T3]], $[[T1]], $[[T3]]
- ; GP32-MM: lw $[[T4:[0-9]+]], 24($sp)
- ; GP32-MM: lw $[[T5:[0-9]+]], 28($sp)
- ; GP32-MM: subu $[[T1]], $7, $[[T5]]
- ; GP32-MM: subu16 $[[T3]], $[[T6:[0-9]+]], $[[T3]]
- ; GP32-MM: sltu $[[T6]], $6, $[[T4]]
- ; GP32-MM: addu16 $[[T0]], $[[T6]], $[[T0]]
- ; GP32-MM: subu16 $[[T0]], $5, $[[T0]]
- ; GP32-MM: sltu $[[T6]], $7, $[[T5]]
- ; GP32-MM: addu $[[T6]], $[[T6]], $[[T4]]
- ; GP32-MM: subu16 $[[T6]], $6, $[[T6]]
- ; GP32-MM: move $[[T2]], $[[T1]]
-
- ; GP64: dsubu $3, $5, $7
- ; GP64: sltu $[[T0:[0-9]+]], $5, $7
- ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
- ; GP64: dsubu $2, $4, $[[T1]]
+; PRE4: lw $[[T0:[0-9]+]], 24($sp)
+; PRE4: lw $[[T1:[0-9]+]], 28($sp)
+; PRE4: sltu $[[T2:[0-9]+]], $7, $[[T1]]
+; PRE4: xor $[[T3:[0-9]+]], $6, $[[T0]]
+; PRE4: sltiu $[[T4:[0-9]+]], $[[T3]], 1
+; PRE4: bnez $[[T4]]
+; PRE4: move $[[T5:[0-9]+]], $[[T2]]
+; PRE4: sltu $[[T5]], $6, $[[T0]]
+
+; PRE4: lw $[[T6:[0-9]+]], 20($sp)
+; PRE4: subu $[[T7:[0-9]+]], $5, $[[T6]]
+; PRE4: subu $[[T8:[0-9]+]], $[[T7]], $[[T5]]
+; PRE4: sltu $[[T9:[0-9]+]], $[[T7]], $[[T5]]
+; PRE4: sltu $[[T10:[0-9]+]], $5, $[[T6]]
+; PRE4: lw $[[T11:[0-9]+]], 16($sp)
+; PRE4: subu $[[T12:[0-9]+]], $4, $[[T11]]
+; PRE4: subu $[[T13:[0-9]+]], $[[T12]], $[[T10]]
+; PRE4: subu $[[T14:[0-9]+]], $[[T13]], $[[T9]]
+; PRE4: subu $[[T15:[0-9]+]], $6, $[[T0]]
+; PRE4: subu $[[T16:[0-9]+]], $[[T15]], $[[T2]]
+; PRE4: subu $5, $7, $[[T1]]
+
+; MMR3: lw $[[T1:[0-9]+]], 48($sp)
+; MMR3: sltu $[[T2:[0-9]+]], $6, $[[T1]]
+; MMR3: xor $[[T3:[0-9]+]], $6, $[[T1]]
+; MMR3: lw $[[T4:[0-9]+]], 52($sp)
+; MMR3: sltu $[[T5:[0-9]+]], $7, $[[T4]]
+; MMR3: movz $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+; MMR3: lw $[[T7:[0-8]+]], 44($sp)
+; MMR3: subu16 $[[T8:[0-9]+]], $5, $[[T7]]
+; MMR3: subu16 $[[T9:[0-9]+]], $[[T8]], $[[T6]]
+; MMR3: sltu $[[T10:[0-9]+]], $[[T8]], $[[T2]]
+; MMR3: sltu $[[T11:[0-9]+]], $5, $[[T7]]
+; MMR3: lw $[[T12:[0-9]+]], 40($sp)
+; MMR3: lw $[[T13:[0-9]+]], 12($sp)
+; MMR3: subu16 $[[T14:[0-9]+]], $[[T13]], $[[T12]]
+; MMR3: subu16 $[[T15:[0-9]+]], $[[T14]], $[[T11]]
+; MMR3: subu16 $[[T16:[0-9]+]], $[[T15]], $[[T10]]
+; MMR3: subu16 $[[T17:[0-9]+]], $6, $[[T1]]
+; MMR3: subu16 $[[T18:[0-9]+]], $[[T17]], $7
+; MMR3: lw $[[T19:[0-9]+]], 8($sp)
+; MMR3: lw $[[T20:[0-9]+]], 0($sp)
+; MMR3: subu16 $5, $[[T19]], $[[T20]]
+
+; MMR6: move $[[T0:[0-9]+]], $7
+; MMR6: sw $[[T0]], 8($sp)
+; MMR6: move $[[T1:[0-9]+]], $5
+; MMR6: sw $4, 12($sp)
+; MMR6: lw $[[T2:[0-9]+]], 48($sp)
+; MMR6: sltu $[[T3:[0-9]+]], $6, $[[T2]]
+; MMR6: xor $[[T4:[0-9]+]], $6, $[[T2]]
+; MMR6: sltiu $[[T5:[0-9]+]], $[[T4]], 1
+; MMR6: seleqz $[[T6:[0-9]+]], $[[T3]], $[[T5]]
+; MMR6: lw $[[T7:[0-9]+]], 52($sp)
+; MMR6: sltu $[[T8:[0-9]+]], $[[T0]], $[[T7]]
+; MMR6: selnez $[[T9:[0-9]+]], $[[T8]], $[[T5]]
+; MMR6: or $[[T10:[0-9]+]], $[[T9]], $[[T6]]
+; MMR6: lw $[[T11:[0-9]+]], 44($sp)
+; MMR6: subu16 $[[T12:[0-9]+]], $[[T1]], $[[T11]]
+; MMR6: subu16 $[[T13:[0-9]+]], $[[T12]], $[[T7]]
+; MMR6: sltu $[[T16:[0-9]+]], $[[T12]], $[[T7]]
+; MMR6: sltu $[[T17:[0-9]+]], $[[T1]], $[[T11]]
+; MMR6: lw $[[T18:[0-9]+]], 40($sp)
+; MMR6: lw $[[T19:[0-9]+]], 12($sp)
+; MMR6: subu16 $[[T20:[0-9]+]], $[[T19]], $[[T18]]
+; MMR6: subu16 $[[T21:[0-9]+]], $[[T20]], $[[T17]]
+; MMR6: subu16 $[[T22:[0-9]+]], $[[T21]], $[[T16]]
+; MMR6: subu16 $[[T23:[0-9]+]], $6, $[[T2]]
+; MMR6: subu16 $4, $[[T23]], $5
+; MMR6: lw $[[T24:[0-9]+]], 8($sp)
+; MMR6: lw $[[T25:[0-9]+]], 0($sp)
+; MMR6: subu16 $5, $[[T24]], $[[T25]]
+; MMR6: lw $3, 4($sp)
+
+; FIXME: The sltu, dsll, dsrl pattern here occurs when an i32 is zero
+; extended to 64 bits. Fortunately slt(i)(u) actually gives an i1.
+; These should be combined away.
+
+; GP64-NOT-R2: dsubu $1, $4, $6
+; GP64-NOT-R2: sltu $[[T0:[0-9]+]], $5, $7
+; GP64-NOT-R2: dsll $[[T1:[0-9]+]], $[[T0]], 32
+; GP64-NOT-R2: dsrl $[[T2:[0-9]+]], $[[T1]], 32
+; GP64-NOT-R2: dsubu $2, $1, $[[T2]]
+; GP64-NOT-R2: dsubu $3, $5, $7
+
+; FIXME: Likewise for the sltu, dext here.
+
+; GP64-R2: dsubu $1, $4, $6
+; GP64-R2: sltu $[[T0:[0-9]+]], $5, $7
+; GP64-R2: dext $[[T1:[0-9]+]], $[[T0]], 0, 32
+; GP64-R2: dsubu $2, $1, $[[T1]]
+; GP64-R2: dsubu $3, $5, $7
+
+; FIXME: Again, redundant sign extension. Also, microMIPSR6 has the
+; dext instruction which should be used here.
+
+; MM64: dsubu $[[T0:[0-9]+]], $4, $6
+; MM64: sltu $[[T1:[0-9]+]], $5, $7
+; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
+; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; MM64: dsubu $2, $[[T0]], $[[T3]]
+; MM64: dsubu $3, $5, $7
+; MM64: jr $ra
%r = sub i128 %a, %b
ret i128 %r
diff --git a/test/CodeGen/Mips/long-calls.ll b/test/CodeGen/Mips/long-calls.ll
new file mode 100644
index 0000000000000..8a95e9b9307d7
--- /dev/null
+++ b/test/CodeGen/Mips/long-calls.ll
@@ -0,0 +1,57 @@
+; RUN: llc -march=mips -mattr=-long-calls %s -o - \
+; RUN: | FileCheck -check-prefix=OFF %s
+; RUN: llc -march=mips -mattr=+long-calls,+noabicalls %s -o - \
+; RUN: | FileCheck -check-prefix=ON32 %s
+
+; RUN: llc -march=mips -mattr=+long-calls,-noabicalls %s -o - \
+; RUN: | FileCheck -check-prefix=OFF %s
+
+; RUN: llc -march=mips64 -target-abi n32 -mattr=-long-calls %s -o - \
+; RUN: | FileCheck -check-prefix=OFF %s
+; RUN: llc -march=mips64 -target-abi n32 -mattr=+long-calls,+noabicalls %s -o - \
+; RUN: | FileCheck -check-prefix=ON32 %s
+
+; RUN: llc -march=mips64 -target-abi n64 -mattr=-long-calls %s -o - \
+; RUN: | FileCheck -check-prefix=OFF %s
+; RUN: llc -march=mips64 -target-abi n64 -mattr=+long-calls,+noabicalls %s -o - \
+; RUN: | FileCheck -check-prefix=ON64 %s
+
+declare void @callee()
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1)
+
+@val = internal unnamed_addr global [20 x i32] zeroinitializer, align 4
+
+define void @caller() {
+
+; Use `jal` instruction with R_MIPS_26 relocation.
+; OFF: jal callee
+; OFF: jal memset
+
+; Save the `callee` and `memset` addresses in $25 register
+; and use `jalr` for the jumps.
+; ON32: lui $1, %hi(callee)
+; ON32: addiu $25, $1, %lo(callee)
+; ON32: jalr $25
+
+; ON32: addiu $1, $zero, %lo(memset)
+; ON32: lui $2, %hi(memset)
+; ON32: addu $25, $2, $1
+; ON32: jalr $25
+
+; ON64: lui $1, %highest(callee)
+; ON64: daddiu $1, $1, %higher(callee)
+; ON64: daddiu $1, $1, %hi(callee)
+; ON64: daddiu $25, $1, %lo(callee)
+; ON64: jalr $25
+
+; ON64: daddiu $1, $zero, %higher(memset)
+; ON64: lui $2, %highest(memset)
+; ON64: lui $2, %hi(memset)
+; ON64: daddiu $2, $zero, %lo(memset)
+; ON64: daddu $25, $1, $2
+; ON64: jalr $25
+
+ call void @callee()
+ call void @llvm.memset.p0i8.i32(i8* bitcast ([20 x i32]* @val to i8*), i8 0, i32 80, i32 4, i1 false)
+ ret void
+}
diff --git a/test/CodeGen/Mips/madd-msub.ll b/test/CodeGen/Mips/madd-msub.ll
index 7baba005a0729..3e1a2e8b97088 100644
--- a/test/CodeGen/Mips/madd-msub.ll
+++ b/test/CodeGen/Mips/madd-msub.ll
@@ -25,11 +25,11 @@
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
-; 32R6-DAG: sra $[[T3:[0-9]+]], $6, 31
-; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
-; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: muh $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sra $[[T4:[0-9]+]], $6, 31
+; 32R6-DAG: addu $[[T5:[0-9]+]], $[[T3]], $[[T4]]
+; 32R6-DAG: addu $2, $[[T5]], $[[T2]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -71,7 +71,7 @@ entry:
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
; FIXME: There's a redundant move here. We should remove it
; 32R6-DAG: muhu $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $2, $[[T3]], $[[T2]]
@@ -109,10 +109,10 @@ entry:
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $7
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $7
-; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $6
-; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $1
+; 32R6-DAG: muh $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T3]], $6
+; 32R6-DAG: addu $2, $[[T4]], $[[T2]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -134,6 +134,17 @@ entry:
ret i64 %add
}
+; ALL-LABEL: madd4
+; ALL-NOT: madd ${{[0-9]+}}, ${{[0-9]+}}
+
+define i32 @madd4(i32 %a, i32 %b, i32 %c) {
+entry:
+ %mul = mul nsw i32 %a, %b
+ %add = add nsw i32 %c, %mul
+
+ ret i32 %add
+}
+
; ALL-LABEL: msub1:
; 32-DAG: sra $[[T0:[0-9]+]], $6, 31
@@ -148,13 +159,13 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sltu $[[T3:[0-9]+]], $6, $[[T1]]
-; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T3]], $[[T0]]
-; 32R6-DAG: sra $[[T5:[0-9]+]], $6, 31
-; 32R6-DAG: subu $2, $[[T5]], $[[T4]]
-; 32R6-DAG: subu $3, $6, $[[T1]]
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T1:[0-9]+]], $6, $[[T0]]
+; 32R6-DAG: muh $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sra $[[T3:[0-9]+]], $6, 31
+; 32R6-DAG: subu $[[T4:[0-9]+]], $[[T3]], $[[T2]]
+; 32R6-DAG: subu $2, $[[T4]], $[[T1]]
+; 32R6-DAG: subu $3, $6, $[[T0]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -194,13 +205,12 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: muhu $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
-
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $6, $[[T1]]
-; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
-; 32R6-DAG: negu $2, $[[T3]]
-; 32R6-DAG: subu $3, $6, $[[T1]]
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T1:[0-9]+]], $6, $[[T0]]
+; 32R6-DAG: muhu $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: negu $[[T3:[0-9]+]], $[[T2]]
+; 32R6-DAG: subu $2, $[[T3]], $[[T1]]
+; 32R6-DAG: subu $3, $6, $[[T0]]
; 64-DAG: d[[m:m]]ult $5, $4
; 64-DAG: [[m]]flo $[[T0:[0-9]+]]
@@ -234,12 +244,12 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $7, $[[T1]]
-; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
-; 32R6-DAG: subu $2, $6, $[[T3]]
-; 32R6-DAG: subu $3, $7, $[[T1]]
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T1:[0-9]+]], $7, $[[T0]]
+; 32R6-DAG: muh $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: subu $[[T3:[0-9]+]], $6, $[[T2]]
+; 32R6-DAG: subu $2, $[[T3]], $[[T1]]
+; 32R6-DAG: subu $3, $7, $[[T0]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -260,3 +270,14 @@ entry:
%sub = sub nsw i64 %c, %mul
ret i64 %sub
}
+
+; ALL-LABEL: msub4
+; ALL-NOT: msub ${{[0-9]+}}, ${{[0-9]+}}
+
+define i32 @msub4(i32 %a, i32 %b, i32 %c) {
+entry:
+ %mul = mul nsw i32 %a, %b
+ %sub = sub nsw i32 %c, %mul
+
+ ret i32 %sub
+}
diff --git a/test/CodeGen/Mips/msa/f16-llvm-ir.ll b/test/CodeGen/Mips/msa/f16-llvm-ir.ll
index ac69dc913c182..b3ed8bdd3b9a9 100644
--- a/test/CodeGen/Mips/msa/f16-llvm-ir.ll
+++ b/test/CodeGen/Mips/msa/f16-llvm-ir.ll
@@ -1,21 +1,21 @@
; RUN: llc -relocation-model=pic -march=mipsel -mcpu=mips32r5 \
-; RUN: -mattr=+fp64,+msa < %s | FileCheck %s \
+; RUN: -mattr=+fp64,+msa -verify-machineinstrs < %s | FileCheck %s \
; RUN: --check-prefixes=ALL,MIPS32,MIPSR5,MIPS32-O32,MIPS32R5-O32
; RUN: llc -relocation-model=pic -march=mips64el -mcpu=mips64r5 \
-; RUN: -mattr=+fp64,+msa -target-abi n32 < %s | FileCheck %s \
+; RUN: -mattr=+fp64,+msa -verify-machineinstrs -target-abi n32 < %s | FileCheck %s \
; RUN: --check-prefixes=ALL,MIPS64,MIPSR5,MIPS64-N32,MIPS64R5-N32
; RUN: llc -relocation-model=pic -march=mips64el -mcpu=mips64r5 \
-; RUN: -mattr=+fp64,+msa -target-abi n64 < %s | FileCheck %s \
+; RUN: -mattr=+fp64,+msa -verify-machineinstrs -target-abi n64 < %s | FileCheck %s \
; RUN: --check-prefixes=ALL,MIPS64,MIPSR5,MIPS64-N64,MIPS64R5-N64
; RUN: llc -relocation-model=pic -march=mipsel -mcpu=mips32r6 \
-; RUN: -mattr=+fp64,+msa < %s | FileCheck %s \
+; RUN: -mattr=+fp64,+msa -verify-machineinstrs < %s | FileCheck %s \
; RUN: --check-prefixes=ALL,MIPS32,MIPSR6,MIPSR6-O32
; RUN: llc -relocation-model=pic -march=mips64el -mcpu=mips64r6 \
-; RUN: -mattr=+fp64,+msa -target-abi n32 < %s | FileCheck %s \
+; RUN: -mattr=+fp64,+msa -verify-machineinstrs -target-abi n32 < %s | FileCheck %s \
; RUN: --check-prefixes=ALL,MIPS64,MIPSR6,MIPS64-N32,MIPSR6-N32
; RUN: llc -relocation-model=pic -march=mips64el -mcpu=mips64r6 \
-; RUN: -mattr=+fp64,+msa -target-abi n64 < %s | FileCheck %s \
+; RUN: -mattr=+fp64,+msa -verify-machineinstrs -target-abi n64 < %s | FileCheck %s \
; RUN: --check-prefixes=ALL,MIPS64,MIPSR6,MIPS64-N64,MIPSR6-N64
diff --git a/test/CodeGen/PowerPC/PR33671.ll b/test/CodeGen/PowerPC/PR33671.ll
new file mode 100644
index 0000000000000..0edd2e8daff4a
--- /dev/null
+++ b/test/CodeGen/PowerPC/PR33671.ll
@@ -0,0 +1,32 @@
+; Function Attrs: norecurse nounwind
+; RUN: llc -mtriple=powerpc64le-unknown-unknown -mcpu=pwr9 < %s | FileCheck %s
+define void @test1(i32* nocapture readonly %arr, i32* nocapture %arrTo) {
+entry:
+ %arrayidx = getelementptr inbounds i32, i32* %arrTo, i64 4
+ %0 = bitcast i32* %arrayidx to <4 x i32>*
+ %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 4
+ %1 = bitcast i32* %arrayidx1 to <4 x i32>*
+ %2 = load <4 x i32>, <4 x i32>* %1, align 16
+ store <4 x i32> %2, <4 x i32>* %0, align 16
+ ret void
+; CHECK-LABEL: test1
+; CHECK: lxv [[LD:[0-9]+]], 16(3)
+; CHECK: stxv [[LD]], 16(4)
+}
+
+; Function Attrs: norecurse nounwind
+define void @test2(i32* nocapture readonly %arr, i32* nocapture %arrTo) {
+entry:
+ %arrayidx = getelementptr inbounds i32, i32* %arrTo, i64 1
+ %0 = bitcast i32* %arrayidx to <4 x i32>*
+ %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 2
+ %1 = bitcast i32* %arrayidx1 to <4 x i32>*
+ %2 = load <4 x i32>, <4 x i32>* %1, align 16
+ store <4 x i32> %2, <4 x i32>* %0, align 16
+ ret void
+; CHECK-LABEL: test2
+; CHECK: addi 3, 3, 8
+; CHECK: lxvx [[LD:[0-9]+]], 0, 3
+; CHECK: addi 3, 4, 4
+; CHECK: stxvx [[LD]], 0, 3
+}
diff --git a/test/CodeGen/PowerPC/build-vector-tests.ll b/test/CodeGen/PowerPC/build-vector-tests.ll
index 60bec4d18f12e..3ad432872c0e1 100644
--- a/test/CodeGen/PowerPC/build-vector-tests.ll
+++ b/test/CodeGen/PowerPC/build-vector-tests.ll
@@ -1018,13 +1018,13 @@ entry:
; P8BE-LABEL: fromDiffMemVarDi
; P8LE-LABEL: fromDiffMemVarDi
; P9BE: sldi {{r[0-9]+}}, r4, 2
-; P9BE-DAG: lxv {{v[0-9]+}}
-; P9BE-DAG: lxv
+; P9BE-DAG: lxvx {{v[0-9]+}}
+; P9BE-DAG: lxvx
; P9BE: vperm
; P9BE: blr
; P9LE: sldi {{r[0-9]+}}, r4, 2
-; P9LE-DAG: lxv {{v[0-9]+}}
-; P9LE-DAG: lxv
+; P9LE-DAG: lxvx {{v[0-9]+}}
+; P9LE-DAG: lxvx
; P9LE: vperm
; P9LE: blr
; P8BE: sldi {{r[0-9]+}}, r4, 2
@@ -1584,16 +1584,16 @@ entry:
; P9LE-LABEL: fromDiffMemConsAConvdtoi
; P8BE-LABEL: fromDiffMemConsAConvdtoi
; P8LE-LABEL: fromDiffMemConsAConvdtoi
-; P9BE: lxv [[REG1:[vs0-9]+]], 0(r3)
-; P9BE: lxv [[REG2:[vs0-9]+]], 16(r3)
+; P9BE-DAG: lxv [[REG1:[vs0-9]+]], 0(r3)
+; P9BE-DAG: lxv [[REG2:[vs0-9]+]], 16(r3)
; P9BE-DAG: xxmrgld [[REG3:[vs0-9]+]], [[REG1]], [[REG2]]
; P9BE-DAG: xxmrghd [[REG4:[vs0-9]+]], [[REG1]], [[REG2]]
; P9BE-DAG: xvcvdpsp [[REG5:[vs0-9]+]], [[REG3]]
; P9BE-DAG: xvcvdpsp [[REG6:[vs0-9]+]], [[REG4]]
; P9BE: vmrgew v2, [[REG6]], [[REG5]]
; P9BE: xvcvspsxws v2, v2
-; P9LE: lxv [[REG1:[vs0-9]+]], 0(r3)
-; P9LE: lxv [[REG2:[vs0-9]+]], 16(r3)
+; P9LE-DAG: lxv [[REG1:[vs0-9]+]], 0(r3)
+; P9LE-DAG: lxv [[REG2:[vs0-9]+]], 16(r3)
; P9LE-DAG: xxmrgld [[REG3:[vs0-9]+]], [[REG2]], [[REG1]]
; P9LE-DAG: xxmrghd [[REG4:[vs0-9]+]], [[REG2]], [[REG1]]
; P9LE-DAG: xvcvdpsp [[REG5:[vs0-9]+]], [[REG3]]
@@ -2177,12 +2177,14 @@ entry:
; P8BE-LABEL: fromDiffMemVarDui
; P8LE-LABEL: fromDiffMemVarDui
; P9BE-DAG: sldi {{r[0-9]+}}, r4, 2
-; P9BE-DAG: lxv {{v[0-9]+}}, -12(r3)
-; P9BE-DAG: lxv
+; P9BE-DAG: addi r3, r3, -12
+; P9BE-DAG: lxvx {{v[0-9]+}}, 0, r3
+; P9BE-DAG: lxvx
; P9BE: vperm
; P9BE: blr
; P9LE-DAG: sldi {{r[0-9]+}}, r4, 2
-; P9LE-DAG: lxv {{v[0-9]+}}, -12(r3)
+; P9LE-DAG: addi r3, r3, -12
+; P9LE-DAG: lxvx {{v[0-9]+}}, 0, r3
; P9LE-DAG: lxv
; P9LE: vperm
; P9LE: blr
@@ -2742,16 +2744,16 @@ entry:
; P9LE-LABEL: fromDiffMemConsAConvdtoui
; P8BE-LABEL: fromDiffMemConsAConvdtoui
; P8LE-LABEL: fromDiffMemConsAConvdtoui
-; P9BE: lxv [[REG1:[vs0-9]+]], 0(r3)
-; P9BE: lxv [[REG2:[vs0-9]+]], 16(r3)
+; P9BE-DAG: lxv [[REG1:[vs0-9]+]], 0(r3)
+; P9BE-DAG: lxv [[REG2:[vs0-9]+]], 16(r3)
; P9BE-DAG: xxmrgld [[REG3:[vs0-9]+]], [[REG1]], [[REG2]]
; P9BE-DAG: xxmrghd [[REG4:[vs0-9]+]], [[REG1]], [[REG2]]
; P9BE-DAG: xvcvdpsp [[REG5:[vs0-9]+]], [[REG3]]
; P9BE-DAG: xvcvdpsp [[REG6:[vs0-9]+]], [[REG4]]
; P9BE: vmrgew v2, [[REG6]], [[REG5]]
; P9BE: xvcvspuxws v2, v2
-; P9LE: lxv [[REG1:[vs0-9]+]], 0(r3)
-; P9LE: lxv [[REG2:[vs0-9]+]], 16(r3)
+; P9LE-DAG: lxv [[REG1:[vs0-9]+]], 0(r3)
+; P9LE-DAG: lxv [[REG2:[vs0-9]+]], 16(r3)
; P9LE-DAG: xxmrgld [[REG3:[vs0-9]+]], [[REG2]], [[REG1]]
; P9LE-DAG: xxmrghd [[REG4:[vs0-9]+]], [[REG2]], [[REG1]]
; P9LE-DAG: xvcvdpsp [[REG5:[vs0-9]+]], [[REG3]]
@@ -3466,9 +3468,9 @@ entry:
; P9LE-LABEL: fromDiffConstsConvftoll
; P8BE-LABEL: fromDiffConstsConvftoll
; P8LE-LABEL: fromDiffConstsConvftoll
-; P9BE: lxv v2
+; P9BE: lxvx v2
; P9BE: blr
-; P9LE: lxv v2
+; P9LE: lxvx v2
; P9LE: blr
; P8BE: lxvd2x v2
; P8BE: blr
@@ -4370,9 +4372,9 @@ entry:
; P9LE-LABEL: fromDiffConstsConvftoull
; P8BE-LABEL: fromDiffConstsConvftoull
; P8LE-LABEL: fromDiffConstsConvftoull
-; P9BE: lxv v2
+; P9BE: lxvx v2
; P9BE: blr
-; P9LE: lxv v2
+; P9LE: lxvx v2
; P9LE: blr
; P8BE: lxvd2x v2
; P8BE: blr
diff --git a/test/CodeGen/PowerPC/ppc64-i128-abi.ll b/test/CodeGen/PowerPC/ppc64-i128-abi.ll
index 90dd1d84fc23c..6d19d7f0d6292 100644
--- a/test/CodeGen/PowerPC/ppc64-i128-abi.ll
+++ b/test/CodeGen/PowerPC/ppc64-i128-abi.ll
@@ -63,7 +63,7 @@ define <1 x i128> @v1i128_increment_by_one(<1 x i128> %a) nounwind {
; FIXME: li [[R1:r[0-9]+]], 1
; FIXME: li [[R2:r[0-9]+]], 0
; FIXME: mtvsrdd [[V1:v[0-9]+]], [[R2]], [[R1]]
-; CHECK-P9: lxv [[V1:v[0-9]+]]
+; CHECK-P9: lxvx [[V1:v[0-9]+]]
; CHECK-P9: vadduqm v2, v2, [[V1]]
; CHECK-P9: blr
@@ -237,8 +237,8 @@ define <1 x i128> @call_v1i128_increment_by_val() nounwind {
; CHECK-LE: blr
; CHECK-P9-LABEL: @call_v1i128_increment_by_val
-; CHECK-P9-DAG: lxv v2
-; CHECK-P9-DAG: lxv v3
+; CHECK-P9-DAG: lxvx v2
+; CHECK-P9-DAG: lxvx v3
; CHECK-P9: bl v1i128_increment_by_val
; CHECK-P9: blr
diff --git a/test/CodeGen/PowerPC/swaps-le-6.ll b/test/CodeGen/PowerPC/swaps-le-6.ll
index e7640cab6aefa..d573441f2cc9c 100644
--- a/test/CodeGen/PowerPC/swaps-le-6.ll
+++ b/test/CodeGen/PowerPC/swaps-le-6.ll
@@ -33,11 +33,11 @@ entry:
; CHECK: stxvd2x [[REG5]]
; CHECK-P9-LABEL: @bar0
-; CHECK-P9-DAG: lxv [[REG1:[0-9]+]]
+; CHECK-P9-DAG: lxvx [[REG1:[0-9]+]]
; CHECK-P9-DAG: lfd [[REG2:[0-9]+]], 0(3)
; CHECK-P9: xxspltd [[REG4:[0-9]+]], [[REG2]], 0
; CHECK-P9: xxpermdi [[REG5:[0-9]+]], [[REG1]], [[REG4]], 1
-; CHECK-P9: stxv [[REG5]]
+; CHECK-P9: stxvx [[REG5]]
define void @bar1() {
entry:
@@ -56,9 +56,9 @@ entry:
; CHECK: stxvd2x [[REG5]]
; CHECK-P9-LABEL: @bar1
-; CHECK-P9-DAG: lxv [[REG1:[0-9]+]]
+; CHECK-P9-DAG: lxvx [[REG1:[0-9]+]]
; CHECK-P9-DAG: lfd [[REG2:[0-9]+]], 0(3)
; CHECK-P9: xxspltd [[REG4:[0-9]+]], [[REG2]], 0
; CHECK-P9: xxmrgld [[REG5:[0-9]+]], [[REG4]], [[REG1]]
-; CHECK-P9: stxv [[REG5]]
+; CHECK-P9: stxvx [[REG5]]
diff --git a/test/CodeGen/PowerPC/vsx-p9.ll b/test/CodeGen/PowerPC/vsx-p9.ll
index 0c29b6adad77f..1ca679f474c37 100644
--- a/test/CodeGen/PowerPC/vsx-p9.ll
+++ b/test/CodeGen/PowerPC/vsx-p9.ll
@@ -36,8 +36,8 @@ entry:
%1 = load <16 x i8>, <16 x i8>* @ucb, align 16
%add.i = add <16 x i8> %1, %0
tail call void (...) @sink(<16 x i8> %add.i)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vaddubm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -45,8 +45,8 @@ entry:
%3 = load <16 x i8>, <16 x i8>* @scb, align 16
%add.i22 = add <16 x i8> %3, %2
tail call void (...) @sink(<16 x i8> %add.i22)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vaddubm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -54,8 +54,8 @@ entry:
%5 = load <8 x i16>, <8 x i16>* @usb, align 16
%add.i21 = add <8 x i16> %5, %4
tail call void (...) @sink(<8 x i16> %add.i21)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vadduhm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -63,8 +63,8 @@ entry:
%7 = load <8 x i16>, <8 x i16>* @ssb, align 16
%add.i20 = add <8 x i16> %7, %6
tail call void (...) @sink(<8 x i16> %add.i20)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vadduhm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -72,8 +72,8 @@ entry:
%9 = load <4 x i32>, <4 x i32>* @uib, align 16
%add.i19 = add <4 x i32> %9, %8
tail call void (...) @sink(<4 x i32> %add.i19)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vadduwm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -81,8 +81,8 @@ entry:
%11 = load <4 x i32>, <4 x i32>* @sib, align 16
%add.i18 = add <4 x i32> %11, %10
tail call void (...) @sink(<4 x i32> %add.i18)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vadduwm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -90,8 +90,8 @@ entry:
%13 = load <2 x i64>, <2 x i64>* @ullb, align 16
%add.i17 = add <2 x i64> %13, %12
tail call void (...) @sink(<2 x i64> %add.i17)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vaddudm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -99,8 +99,8 @@ entry:
%15 = load <2 x i64>, <2 x i64>* @sllb, align 16
%add.i16 = add <2 x i64> %15, %14
tail call void (...) @sink(<2 x i64> %add.i16)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vaddudm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -108,8 +108,8 @@ entry:
%17 = load <1 x i128>, <1 x i128>* @uxb, align 16
%add.i15 = add <1 x i128> %17, %16
tail call void (...) @sink(<1 x i128> %add.i15)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vadduqm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -117,8 +117,8 @@ entry:
%19 = load <1 x i128>, <1 x i128>* @sxb, align 16
%add.i14 = add <1 x i128> %19, %18
tail call void (...) @sink(<1 x i128> %add.i14)
-; CHECK: lxv 34, 0(3)
-; CHECK: lxv 35, 0(4)
+; CHECK: lxvx 34, 0, 3
+; CHECK: lxvx 35, 0, 4
; CHECK: vadduqm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
@@ -126,8 +126,8 @@ entry:
%21 = load <4 x float>, <4 x float>* @vfb, align 16
%add.i13 = fadd <4 x float> %20, %21
tail call void (...) @sink(<4 x float> %add.i13)
-; CHECK: lxv 0, 0(3)
-; CHECK: lxv 1, 0(4)
+; CHECK: lxvx 0, 0, 3
+; CHECK: lxvx 1, 0, 4
; CHECK: xvaddsp 34, 0, 1
; CHECK: stxv 34,
; CHECK: bl sink
@@ -135,8 +135,8 @@ entry:
%23 = load <2 x double>, <2 x double>* @vdb, align 16
%add.i12 = fadd <2 x double> %22, %23
tail call void (...) @sink(<2 x double> %add.i12)
-; CHECK: lxv 0, 0(3)
-; CHECK: lxv 1, 0(4)
+; CHECK: lxvx 0, 0, 3
+; CHECK: lxvx 1, 0, 4
; CHECK: xvadddp 0, 0, 1
; CHECK: stxv 0,
; CHECK: bl sink
diff --git a/test/CodeGen/SPARC/soft-mul-div.ll b/test/CodeGen/SPARC/soft-mul-div.ll
new file mode 100644
index 0000000000000..7c453dd35be7b
--- /dev/null
+++ b/test/CodeGen/SPARC/soft-mul-div.ll
@@ -0,0 +1,65 @@
+; RUN: llc -march=sparc -mcpu=v7 -O0 < %s | FileCheck %s
+
+define i32 @test_mul32(i32 %a, i32 %b) #0 {
+ ; CHECK-LABEL: test_mul32
+ ; CHECK: call .umul
+ %m = mul i32 %a, %b
+ ret i32 %m
+}
+
+define i16 @test_mul16(i16 %a, i16 %b) #0 {
+ ; CHECK-LABEL: test_mul16
+ ; CHECK: call .umul
+ %m = mul i16 %a, %b
+ ret i16 %m
+}
+
+define i8 @test_mul8(i8 %a, i8 %b) #0 {
+ ; CHECK-LABEL: test_mul8
+ ; CHECK: call .umul
+ %m = mul i8 %a, %b
+ ret i8 %m
+}
+
+define i32 @test_sdiv32(i32 %a, i32 %b) #0 {
+ ; CHECK-LABEL: test_sdiv32
+ ; CHECK: call .div
+ %d = sdiv i32 %a, %b
+ ret i32 %d
+}
+
+define i16 @test_sdiv16(i16 %a, i16 %b) #0 {
+ ; CHECK-LABEL: test_sdiv16
+ ; CHECK: call .div
+ %d = sdiv i16 %a, %b
+ ret i16 %d
+}
+
+define i8 @test_sdiv8(i8 %a, i8 %b) #0 {
+ ; CHECK-LABEL: test_sdiv8
+ ; CHECK: call .div
+ %d = sdiv i8 %a, %b
+ ret i8 %d
+}
+
+define i32 @test_udiv32(i32 %a, i32 %b) #0 {
+ ; CHECK-LABEL: test_udiv32
+ ; CHECK: call .udiv
+ %d = udiv i32 %a, %b
+ ret i32 %d
+}
+
+define i16 @test_udiv16(i16 %a, i16 %b) #0 {
+ ; CHECK-LABEL: test_udiv16
+ ; CHECK: call .udiv
+ %d = udiv i16 %a, %b
+ ret i16 %d
+}
+
+define i8 @test_udiv8(i8 %a, i8 %b) #0 {
+ ; CHECK-LABEL: test_udiv8
+ ; CHECK: call .udiv
+ %d = udiv i8 %a, %b
+ ret i8 %d
+}
+
diff --git a/test/CodeGen/SystemZ/branch-11.ll b/test/CodeGen/SystemZ/branch-11.ll
new file mode 100644
index 0000000000000..ce7b3ef267b49
--- /dev/null
+++ b/test/CodeGen/SystemZ/branch-11.ll
@@ -0,0 +1,56 @@
+; Test indirect jumps on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define i32 @f1(i32 %x, i32 %y, i32 %op) {
+; CHECK-LABEL: f1:
+; CHECK: ahi %r4, -1
+; CHECK: clibh %r4, 5, 0(%r14)
+; CHECK: llgfr [[OP64:%r[0-5]]], %r4
+; CHECK: sllg [[INDEX:%r[1-5]]], [[OP64]], 3
+; CHECK: larl [[BASE:%r[1-5]]]
+; CHECK: bi 0([[BASE]],[[INDEX]])
+entry:
+ switch i32 %op, label %exit [
+ i32 1, label %b.add
+ i32 2, label %b.sub
+ i32 3, label %b.and
+ i32 4, label %b.or
+ i32 5, label %b.xor
+ i32 6, label %b.mul
+ ]
+
+b.add:
+ %add = add i32 %x, %y
+ br label %exit
+
+b.sub:
+ %sub = sub i32 %x, %y
+ br label %exit
+
+b.and:
+ %and = and i32 %x, %y
+ br label %exit
+
+b.or:
+ %or = or i32 %x, %y
+ br label %exit
+
+b.xor:
+ %xor = xor i32 %x, %y
+ br label %exit
+
+b.mul:
+ %mul = mul i32 %x, %y
+ br label %exit
+
+exit:
+ %res = phi i32 [ %x, %entry ],
+ [ %add, %b.add ],
+ [ %sub, %b.sub ],
+ [ %and, %b.and ],
+ [ %or, %b.or ],
+ [ %xor, %b.xor ],
+ [ %mul, %b.mul ]
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/fp-abs-03.ll b/test/CodeGen/SystemZ/fp-abs-03.ll
new file mode 100644
index 0000000000000..cab6c116bc08c
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-abs-03.ll
@@ -0,0 +1,43 @@
+; Test floating-point absolute on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test f32.
+declare float @llvm.fabs.f32(float %f)
+define float @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: lpdfr %f0, %f0
+; CHECK: br %r14
+ %res = call float @llvm.fabs.f32(float %f)
+ ret float %res
+}
+
+; Test f64.
+declare double @llvm.fabs.f64(double %f)
+define double @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: lpdfr %f0, %f0
+; CHECK: br %r14
+ %res = call double @llvm.fabs.f64(double %f)
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure absolute would probably
+; be better implemented using an NI on the upper byte. Do some extra
+; processing so that using FPRs is unequivocally better.
+declare fp128 @llvm.fabs.f128(fp128 %f)
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: wflpxb [[POSREG1:%v[0-9]+]], [[REG1]]
+; CHECK: wfdxb [[RES:%v[0-9]+]], [[POSREG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %orig = load fp128 , fp128 *%ptr
+ %abs = call fp128 @llvm.fabs.f128(fp128 %orig)
+ %op2 = load fp128 , fp128 *%ptr2
+ %res = fdiv fp128 %abs, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-abs-04.ll b/test/CodeGen/SystemZ/fp-abs-04.ll
new file mode 100644
index 0000000000000..606bce3de36e2
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-abs-04.ll
@@ -0,0 +1,46 @@
+; Test negated floating-point absolute on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test f32.
+declare float @llvm.fabs.f32(float %f)
+define float @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: lndfr %f0, %f0
+; CHECK: br %r14
+ %abs = call float @llvm.fabs.f32(float %f)
+ %res = fsub float -0.0, %abs
+ ret float %res
+}
+
+; Test f64.
+declare double @llvm.fabs.f64(double %f)
+define double @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: lndfr %f0, %f0
+; CHECK: br %r14
+ %abs = call double @llvm.fabs.f64(double %f)
+ %res = fsub double -0.0, %abs
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure negative-absolute would
+; probably be better implemented using an OI on the upper byte. Do some
+; extra processing so that using FPRs is unequivocally better.
+declare fp128 @llvm.fabs.f128(fp128 %f)
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: wflnxb [[NEGREG1:%v[0-9]+]], [[REG1]]
+; CHECK: wfdxb [[RES:%v[0-9]+]], [[NEGREG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %orig = load fp128 , fp128 *%ptr
+ %abs = call fp128 @llvm.fabs.f128(fp128 %orig)
+ %negabs = fsub fp128 0xL00000000000000008000000000000000, %abs
+ %op2 = load fp128 , fp128 *%ptr2
+ %res = fdiv fp128 %negabs, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-add-01.ll b/test/CodeGen/SystemZ/fp-add-01.ll
index 5b0ed0513a377..219607d628d7f 100644
--- a/test/CodeGen/SystemZ/fp-add-01.ll
+++ b/test/CodeGen/SystemZ/fp-add-01.ll
@@ -1,6 +1,8 @@
; Test 32-bit floating-point addition.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
@@ -76,7 +78,7 @@ define float @f6(float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: aeb %f0, 16{{[04]}}(%r15)
+; CHECK-SCALAR: aeb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
diff --git a/test/CodeGen/SystemZ/fp-add-04.ll b/test/CodeGen/SystemZ/fp-add-04.ll
new file mode 100644
index 0000000000000..186f37ca51821
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-add-04.ll
@@ -0,0 +1,17 @@
+; Test 128-bit floating-point addition on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfaxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %sum = fadd fp128 %f1, %f2
+ store fp128 %sum, fp128 *%ptr1
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-cmp-01.ll b/test/CodeGen/SystemZ/fp-cmp-01.ll
index 075c7aa3dd843..146b16bc695fa 100644
--- a/test/CodeGen/SystemZ/fp-cmp-01.ll
+++ b/test/CodeGen/SystemZ/fp-cmp-01.ll
@@ -1,7 +1,10 @@
; Test 32-bit floating-point comparison. The tests assume a z10 implementation
; of select, using conditional branches rather than LOCGR.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
declare float @foo()
@@ -9,8 +12,9 @@ declare float @foo()
define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) {
; CHECK-LABEL: f1:
; CHECK: cebr %f0, %f2
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%cond = fcmp oeq float %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
@@ -21,8 +25,9 @@ define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) {
define i64 @f2(i64 %a, i64 %b, float %f1, float *%ptr) {
; CHECK-LABEL: f2:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%f2 = load float , float *%ptr
%cond = fcmp oeq float %f1, %f2
@@ -34,8 +39,9 @@ define i64 @f2(i64 %a, i64 %b, float %f1, float *%ptr) {
define i64 @f3(i64 %a, i64 %b, float %f1, float *%base) {
; CHECK-LABEL: f3:
; CHECK: ceb %f0, 4092(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%f2 = load float , float *%ptr
@@ -50,8 +56,9 @@ define i64 @f4(i64 %a, i64 %b, float %f1, float *%base) {
; CHECK-LABEL: f4:
; CHECK: aghi %r4, 4096
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1024
%f2 = load float , float *%ptr
@@ -65,8 +72,9 @@ define i64 @f5(i64 %a, i64 %b, float %f1, float *%base) {
; CHECK-LABEL: f5:
; CHECK: aghi %r4, -4
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 -1
%f2 = load float , float *%ptr
@@ -80,8 +88,9 @@ define i64 @f6(i64 %a, i64 %b, float %f1, float *%base, i64 %index) {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r5, 2
; CHECK: ceb %f0, 400(%r1,%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%ptr1 = getelementptr float, float *%base, i64 %index
%ptr2 = getelementptr float, float *%ptr1, i64 100
@@ -95,7 +104,7 @@ define i64 @f6(i64 %a, i64 %b, float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: ceb {{%f[0-9]+}}, 16{{[04]}}(%r15)
+; CHECK-SCALAR: ceb {{%f[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
@@ -153,8 +162,9 @@ define float @f7(float *%ptr0) {
define i64 @f8(i64 %a, i64 %b, float %f) {
; CHECK-LABEL: f8:
; CHECK: ltebr %f0, %f0
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%cond = fcmp oeq float %f, 0.0
%res = select i1 %cond, i64 %a, i64 %b
@@ -166,8 +176,9 @@ define i64 @f8(i64 %a, i64 %b, float %f) {
define i64 @f9(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f9:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp oeq float %f1, %f2
@@ -179,8 +190,9 @@ define i64 @f9(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f10(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f10:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: blhr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: blhr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnlh %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp one float %f1, %f2
@@ -192,8 +204,9 @@ define i64 @f10(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f11(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f11:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bhr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bhr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnh %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp olt float %f1, %f2
@@ -205,8 +218,9 @@ define i64 @f11(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f12(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f12:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bher %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bher %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnhe %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ole float %f1, %f2
@@ -218,8 +232,9 @@ define i64 @f12(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f13(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f13:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bler %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bler %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnle %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp oge float %f1, %f2
@@ -231,8 +246,9 @@ define i64 @f13(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f14(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f14:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: blr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: blr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnl %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ogt float %f1, %f2
@@ -244,8 +260,9 @@ define i64 @f14(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f15(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f15:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnlhr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnlhr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrlh %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ueq float %f1, %f2
@@ -257,8 +274,9 @@ define i64 @f15(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f16(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f16:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bner %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bner %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgre %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp une float %f1, %f2
@@ -270,8 +288,9 @@ define i64 @f16(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f17(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f17:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnler %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnler %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrle %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ult float %f1, %f2
@@ -283,8 +302,9 @@ define i64 @f17(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f18(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f18:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnlr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnlr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrl %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ule float %f1, %f2
@@ -296,8 +316,9 @@ define i64 @f18(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f19(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f19:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnhr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnhr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrh %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp uge float %f1, %f2
@@ -309,8 +330,9 @@ define i64 @f19(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f20(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f20:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnher %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnher %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrhe %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ugt float %f1, %f2
diff --git a/test/CodeGen/SystemZ/fp-cmp-06.ll b/test/CodeGen/SystemZ/fp-cmp-06.ll
new file mode 100644
index 0000000000000..e146b51e4fb27
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-cmp-06.ll
@@ -0,0 +1,33 @@
+; Test f128 comparisons on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; There is no memory form of 128-bit comparison.
+define i64 @f1(i64 %a, i64 %b, fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r4)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r5)
+; CHECK: wfcxb [[REG1]], [[REG2]]
+; CHECK-NEXT: locgrne %r2, %r3
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %cond = fcmp oeq fp128 %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check comparison with zero -- it is not worthwhile to copy to
+; FP pairs just so we can use LTXBR, so simply load up a zero.
+define i64 @f2(i64 %a, i64 %b, fp128 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r4)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfcxb [[REG1]], [[REG2]]
+; CHECK-NEXT: locgrne %r2, %r3
+; CHECK: br %r14
+ %f = load fp128, fp128 *%ptr
+ %cond = fcmp oeq fp128 %f, 0xL00000000000000000000000000000000
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/fp-const-11.ll b/test/CodeGen/SystemZ/fp-const-11.ll
new file mode 100644
index 0000000000000..8523f2786c34c
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-11.ll
@@ -0,0 +1,40 @@
+; Test loads of f128 floating-point constants on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s -check-prefix=CONST
+
+; Test loading zero.
+define void @f1(fp128 *%x) {
+; CHECK-LABEL: f1:
+; CHECK: vzero [[REG:%v[0-9]+]]
+; CHECK: vst [[REG]], 0(%r2)
+; CHECK: br %r14
+ store fp128 0xL00000000000000000000000000000000, fp128 *%x
+ ret void
+}
+
+; Test loading of negative floating-point zero.
+define void @f2(fp128 *%x) {
+; CHECK-LABEL: f2:
+; CHECK: vzero [[REG:%v[0-9]+]]
+; CHECK: wflnxb [[REG]], [[REG]]
+; CHECK: vst [[REG]], 0(%r2)
+; CHECK: br %r14
+ store fp128 0xL00000000000000008000000000000000, fp128 *%x
+ ret void
+}
+
+; Test loading of a 128-bit floating-point constant. This value would
+; actually fit within the 32-bit format, but we don't have extending
+; loads into vector registers.
+define void @f3(fp128 *%x) {
+; CHECK-LABEL: f3:
+; CHECK: larl [[REGISTER:%r[1-5]+]], {{.*}}
+; CHECK: vl [[REG:%v[0-9]+]], 0([[REGISTER]])
+; CHECK: vst [[REG]], 0(%r2)
+; CHECK: br %r14
+; CONST: .quad 4611404543484231680
+; CONST: .quad 0
+ store fp128 0xL00000000000000003fff000002000000, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-15.ll b/test/CodeGen/SystemZ/fp-conv-15.ll
new file mode 100644
index 0000000000000..61100016c4263
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-15.ll
@@ -0,0 +1,50 @@
+; Test f128 floating-point truncations/extensions on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test f128->f64.
+define double @f1(fp128 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wflrx %f0, [[REG]], 0, 0
+; CHECK: br %r14
+ %val = load fp128, fp128 *%ptr
+ %res = fptrunc fp128 %val to double
+ ret double %res
+}
+
+; Test f128->f32.
+define float @f2(fp128 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wflrx %f0, [[REG]], 0, 3
+; CHECK: ledbra %f0, 0, %f0, 0
+; CHECK: br %r14
+ %val = load fp128, fp128 *%ptr
+ %res = fptrunc fp128 %val to float
+ ret float %res
+}
+
+; Test f64->f128.
+define void @f3(fp128 *%dst, double %val) {
+; CHECK-LABEL: f3:
+; CHECK: wflld [[RES:%v[0-9]+]], %f0
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %res = fpext double %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Test f32->f128.
+define void @f4(fp128 *%dst, float %val) {
+; CHECK-LABEL: f4:
+; CHECK: ldebr %f0, %f0
+; CHECK: wflld [[RES:%v[0-9]+]], %f0
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %res = fpext float %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-conv-16.ll b/test/CodeGen/SystemZ/fp-conv-16.ll
new file mode 100644
index 0000000000000..4f9bb865694ba
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-16.ll
@@ -0,0 +1,99 @@
+; Test f128 floating-point conversion to/from integers on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test signed i32->f128.
+define void @f1(i32 %i, fp128 *%dst) {
+; CHECK-LABEL: f1:
+; CHECK: cxfbr %f0, %r2
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = sitofp i32 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test signed i64->f128.
+define void @f2(i64 %i, fp128 *%dst) {
+; CHECK-LABEL: f2:
+; CHECK: cxgbr %f0, %r2
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = sitofp i64 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test unsigned i32->f128.
+define void @f3(i32 %i, fp128 *%dst) {
+; CHECK-LABEL: f3:
+; CHECK: cxlfbr %f0, 0, %r2, 0
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = uitofp i32 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test unsigned i64->f128.
+define void @f4(i64 %i, fp128 *%dst) {
+; CHECK-LABEL: f4:
+; CHECK: cxlgbr %f0, 0, %r2, 0
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = uitofp i64 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test signed f128->i32.
+define i32 @f5(fp128 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: cfxbr %r2, 5, %f0
+; CHECK: br %r14
+ %f = load fp128, fp128 *%src
+ %conv = fptosi fp128 %f to i32
+ ret i32 %conv
+}
+
+; Test signed f128->i64.
+define i64 @f6(fp128 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: cgxbr %r2, 5, %f0
+; CHECK: br %r14
+ %f = load fp128, fp128 *%src
+ %conv = fptosi fp128 %f to i64
+ ret i64 %conv
+}
+
+; Test unsigned f128->i32.
+define i32 @f7(fp128 *%src) {
+; CHECK-LABEL: f7:
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: clfxbr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %f = load fp128 , fp128 *%src
+ %conv = fptoui fp128 %f to i32
+ ret i32 %conv
+}
+
+; Test unsigned f128->i64.
+define i64 @f8(fp128 *%src) {
+; CHECK-LABEL: f8:
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: clgxbr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %f = load fp128 , fp128 *%src
+ %conv = fptoui fp128 %f to i64
+ ret i64 %conv
+}
diff --git a/test/CodeGen/SystemZ/fp-copysign-02.ll b/test/CodeGen/SystemZ/fp-copysign-02.ll
new file mode 100644
index 0000000000000..657c0e18767b6
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-copysign-02.ll
@@ -0,0 +1,81 @@
+; Test f128 copysign operations on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare float @copysignf(float, float) readnone
+declare double @copysign(double, double) readnone
+; FIXME: not really the correct prototype for SystemZ.
+declare fp128 @copysignl(fp128, fp128) readnone
+
+; Test f32 copies in which the sign comes from an f128.
+define float @f1(float %a, fp128 *%bptr) {
+; CHECK-LABEL: f1:
+; CHECK: vl %v[[REG:[0-9]+]], 0(%r2)
+; CHECK: cpsdr %f0, %f[[REG]], %f0
+; CHECK: br %r14
+ %bl = load volatile fp128, fp128 *%bptr
+ %b = fptrunc fp128 %bl to float
+ %res = call float @copysignf(float %a, float %b) readnone
+ ret float %res
+}
+
+; Test f64 copies in which the sign comes from an f128.
+define double @f2(double %a, fp128 *%bptr) {
+; CHECK-LABEL: f2:
+; CHECK: vl %v[[REG:[0-9]+]], 0(%r2)
+; CHECK: cpsdr %f0, %f[[REG]], %f0
+; CHECK: br %r14
+ %bl = load volatile fp128, fp128 *%bptr
+ %b = fptrunc fp128 %bl to double
+ %res = call double @copysign(double %a, double %b) readnone
+ ret double %res
+}
+
+; Test f128 copies in which the sign comes from an f32.
+define void @f7(fp128 *%cptr, fp128 *%aptr, float %bf) {
+; CHECK-LABEL: f7:
+; CHECK: vl [[REG1:%v[0-7]+]], 0(%r3)
+; CHECK: tmlh
+; CHECK: wflnxb [[REG1]], [[REG1]]
+; CHECK: wflpxb [[REG1]], [[REG1]]
+; CHECK: vst [[REG1]], 0(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128, fp128 *%aptr
+ %b = fpext float %bf to fp128
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
+
+; As above, but the sign comes from an f64.
+define void @f8(fp128 *%cptr, fp128 *%aptr, double %bd) {
+; CHECK-LABEL: f8:
+; CHECK: vl [[REG1:%v[0-7]+]], 0(%r3)
+; CHECK: tmhh
+; CHECK: wflnxb [[REG1]], [[REG1]]
+; CHECK: wflpxb [[REG1]], [[REG1]]
+; CHECK: vst [[REG1]], 0(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128, fp128 *%aptr
+ %b = fpext double %bd to fp128
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
+
+; As above, but the sign comes from an f128.
+define void @f9(fp128 *%cptr, fp128 *%aptr, fp128 *%bptr) {
+; CHECK-LABEL: f9:
+; CHECK: vl [[REG1:%v[0-7]+]], 0(%r3)
+; CHECK: vl [[REG2:%v[0-7]+]], 0(%r4)
+; CHECK: tm
+; CHECK: wflnxb [[REG1]], [[REG1]]
+; CHECK: wflpxb [[REG1]], [[REG1]]
+; CHECK: vst [[REG1]], 0(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128, fp128 *%aptr
+ %b = load volatile fp128, fp128 *%bptr
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-div-01.ll b/test/CodeGen/SystemZ/fp-div-01.ll
index 0791e8db93f8e..ee514dc474e95 100644
--- a/test/CodeGen/SystemZ/fp-div-01.ll
+++ b/test/CodeGen/SystemZ/fp-div-01.ll
@@ -1,6 +1,8 @@
; Test 32-bit floating-point division.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
@@ -76,7 +78,7 @@ define float @f6(float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: deb %f0, 16{{[04]}}(%r15)
+; CHECK-SCALAR: deb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
diff --git a/test/CodeGen/SystemZ/fp-div-04.ll b/test/CodeGen/SystemZ/fp-div-04.ll
new file mode 100644
index 0000000000000..54e87f46c84a8
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-div-04.ll
@@ -0,0 +1,17 @@
+; Test 128-bit floating-point division on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfdxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %sum = fdiv fp128 %f1, %f2
+ store fp128 %sum, fp128 *%ptr1
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-move-13.ll b/test/CodeGen/SystemZ/fp-move-13.ll
new file mode 100644
index 0000000000000..d6c53eaceeef0
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-13.ll
@@ -0,0 +1,46 @@
+; Test f128 moves on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; VR-to-VR moves. Since f128s are passed by reference,
+; we need to force a copy by other means.
+define void @f1(fp128 *%x) {
+; CHECK-LABEL: f1:
+; CHECK: vlr
+; CHECK: vleig
+; CHECK: br %r14
+ %val = load volatile fp128 , fp128 *%x
+ %t1 = bitcast fp128 %val to <2 x i64>
+ %t2 = insertelement <2 x i64> %t1, i64 0, i32 0
+ %res = bitcast <2 x i64> %t2 to fp128
+ store volatile fp128 %res, fp128 *%x
+ store volatile fp128 %val, fp128 *%x
+ ret void
+}
+
+; Test 128-bit moves from GPRs to VRs. i128 isn't a legitimate type,
+; so this goes through memory.
+define void @f2(fp128 *%a, i128 *%b) {
+; CHECK-LABEL: f2:
+; CHECK: lg
+; CHECK: lg
+; CHECK: stg
+; CHECK: stg
+; CHECK: br %r14
+ %val = load i128 , i128 *%b
+ %res = bitcast i128 %val to fp128
+ store fp128 %res, fp128 *%a
+ ret void
+}
+
+; Test 128-bit moves from VRs to GPRs, with the same restriction as f2.
+define void @f3(fp128 *%a, i128 *%b) {
+; CHECK-LABEL: f3:
+; CHECK: vl
+; CHECK: vst
+ %val = load fp128 , fp128 *%a
+ %res = bitcast fp128 %val to i128
+ store i128 %res, i128 *%b
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-mul-01.ll b/test/CodeGen/SystemZ/fp-mul-01.ll
index 3b72d25e0b5c4..126567b218abb 100644
--- a/test/CodeGen/SystemZ/fp-mul-01.ll
+++ b/test/CodeGen/SystemZ/fp-mul-01.ll
@@ -1,6 +1,8 @@
; Test multiplication of two f32s, producing an f32 result.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
@@ -76,7 +78,7 @@ define float @f6(float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: meeb %f0, 16{{[04]}}(%r15)
+; CHECK-SCALAR: meeb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
diff --git a/test/CodeGen/SystemZ/fp-mul-06.ll b/test/CodeGen/SystemZ/fp-mul-06.ll
index 896fafecbdaf4..581e44eeaa2ff 100644
--- a/test/CodeGen/SystemZ/fp-mul-06.ll
+++ b/test/CodeGen/SystemZ/fp-mul-06.ll
@@ -1,11 +1,15 @@
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
define float @f1(float %f1, float %f2, float %acc) {
; CHECK-LABEL: f1:
-; CHECK: maebr %f4, %f0, %f2
-; CHECK: ler %f0, %f4
+; CHECK-SCALAR: maebr %f4, %f0, %f2
+; CHECK-SCALAR: ler %f0, %f4
+; CHECK-VECTOR: wfmasb %f0, %f0, %f2, %f4
; CHECK: br %r14
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
ret float %res
@@ -14,7 +18,8 @@ define float @f1(float %f1, float %f2, float %acc) {
define float @f2(float %f1, float *%ptr, float %acc) {
; CHECK-LABEL: f2:
; CHECK: maeb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%f2 = load float , float *%ptr
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
@@ -24,7 +29,8 @@ define float @f2(float %f1, float *%ptr, float %acc) {
define float @f3(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f3:
; CHECK: maeb %f2, %f0, 4092(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%f2 = load float , float *%ptr
@@ -39,7 +45,8 @@ define float @f4(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: maeb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1024
%f2 = load float , float *%ptr
@@ -54,7 +61,8 @@ define float @f5(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: maeb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 -1
%f2 = load float , float *%ptr
@@ -66,7 +74,8 @@ define float @f6(float %f1, float *%base, i64 %index, float %acc) {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: maeb %f2, %f0, 0(%r1,%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 %index
%f2 = load float , float *%ptr
@@ -78,7 +87,8 @@ define float @f7(float %f1, float *%base, i64 %index, float %acc) {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 2
; CHECK: maeb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1023
%ptr = getelementptr float, float *%base, i64 %index2
@@ -92,7 +102,8 @@ define float @f8(float %f1, float *%base, i64 %index, float %acc) {
; CHECK: sllg %r1, %r3, 2
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
; CHECK: maeb %f2, %f0, 0(%r1)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1024
%ptr = getelementptr float, float *%base, i64 %index2
diff --git a/test/CodeGen/SystemZ/fp-mul-08.ll b/test/CodeGen/SystemZ/fp-mul-08.ll
index 5e5538bfacc93..5b1f9b96c089e 100644
--- a/test/CodeGen/SystemZ/fp-mul-08.ll
+++ b/test/CodeGen/SystemZ/fp-mul-08.ll
@@ -1,11 +1,15 @@
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
define float @f1(float %f1, float %f2, float %acc) {
; CHECK-LABEL: f1:
-; CHECK: msebr %f4, %f0, %f2
-; CHECK: ler %f0, %f4
+; CHECK-SCALAR: msebr %f4, %f0, %f2
+; CHECK-SCALAR: ler %f0, %f4
+; CHECK-VECTOR: wfmssb %f0, %f0, %f2, %f4
; CHECK: br %r14
%negacc = fsub float -0.0, %acc
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
@@ -15,7 +19,8 @@ define float @f1(float %f1, float %f2, float %acc) {
define float @f2(float %f1, float *%ptr, float %acc) {
; CHECK-LABEL: f2:
; CHECK: mseb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%f2 = load float , float *%ptr
%negacc = fsub float -0.0, %acc
@@ -26,7 +31,8 @@ define float @f2(float %f1, float *%ptr, float %acc) {
define float @f3(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f3:
; CHECK: mseb %f2, %f0, 4092(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%f2 = load float , float *%ptr
@@ -42,7 +48,8 @@ define float @f4(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: mseb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1024
%f2 = load float , float *%ptr
@@ -58,7 +65,8 @@ define float @f5(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: mseb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 -1
%f2 = load float , float *%ptr
@@ -71,7 +79,8 @@ define float @f6(float %f1, float *%base, i64 %index, float %acc) {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: mseb %f2, %f0, 0(%r1,%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 %index
%f2 = load float , float *%ptr
@@ -84,7 +93,8 @@ define float @f7(float %f1, float *%base, i64 %index, float %acc) {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 2
; CHECK: mseb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1023
%ptr = getelementptr float, float *%base, i64 %index2
@@ -99,7 +109,8 @@ define float @f8(float %f1, float *%base, i64 %index, float %acc) {
; CHECK: sllg %r1, %r3, 2
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
; CHECK: mseb %f2, %f0, 0(%r1)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1024
%ptr = getelementptr float, float *%base, i64 %index2
diff --git a/test/CodeGen/SystemZ/fp-mul-10.ll b/test/CodeGen/SystemZ/fp-mul-10.ll
new file mode 100644
index 0000000000000..c23a6a202ad5f
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-10.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare double @llvm.fma.f64(double %f1, double %f2, double %f3)
+declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
+
+define double @f1(double %f1, double %f2, double %acc) {
+; CHECK-LABEL: f1:
+; CHECK: wfnmadb %f0, %f0, %f2, %f4
+; CHECK: br %r14
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ %negres = fsub double -0.0, %res
+ ret double %negres
+}
+
+define double @f2(double %f1, double %f2, double %acc) {
+; CHECK-LABEL: f2:
+; CHECK: wfnmsdb %f0, %f0, %f2, %f4
+; CHECK: br %r14
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ %negres = fsub double -0.0, %res
+ ret double %negres
+}
+
+define float @f3(float %f1, float %f2, float %acc) {
+; CHECK-LABEL: f3:
+; CHECK: wfnmasb %f0, %f0, %f2, %f4
+; CHECK: br %r14
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ %negres = fsub float -0.0, %res
+ ret float %negres
+}
+
+define float @f4(float %f1, float %f2, float %acc) {
+; CHECK-LABEL: f4:
+; CHECK: wfnmssb %f0, %f0, %f2, %f4
+; CHECK: br %r14
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ %negres = fsub float -0.0, %res
+ ret float %negres
+}
+
diff --git a/test/CodeGen/SystemZ/fp-mul-11.ll b/test/CodeGen/SystemZ/fp-mul-11.ll
new file mode 100644
index 0000000000000..ef45bf184a4c2
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-11.ll
@@ -0,0 +1,32 @@
+; Test 128-bit floating-point multiplication on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfmxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %sum = fmul fp128 %f1, %f2
+ store fp128 %sum, fp128 *%ptr1
+ ret void
+}
+
+define void @f2(double %f1, double %f2, fp128 *%dst) {
+; CHECK-LABEL: f2:
+; CHECK-DAG: wflld [[REG1:%v[0-9]+]], %f0
+; CHECK-DAG: wflld [[REG2:%v[0-9]+]], %f2
+; CHECK: wfmxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1x = fpext double %f1 to fp128
+ %f2x = fpext double %f2 to fp128
+ %res = fmul fp128 %f1x, %f2x
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-mul-12.ll b/test/CodeGen/SystemZ/fp-mul-12.ll
new file mode 100644
index 0000000000000..331f9a30c2741
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-12.ll
@@ -0,0 +1,72 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare fp128 @llvm.fma.f128(fp128 %f1, fp128 %f2, fp128 %f3)
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2, fp128 *%ptr3, fp128 *%dst) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: vl [[REG3:%v[0-9]+]], 0(%r4)
+; CHECK: wfmaxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], [[REG3]]
+; CHECK: vst [[RES]], 0(%r5)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %f3 = load fp128, fp128 *%ptr3
+ %res = call fp128 @llvm.fma.f128 (fp128 %f1, fp128 %f2, fp128 %f3)
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+define void @f2(fp128 *%ptr1, fp128 *%ptr2, fp128 *%ptr3, fp128 *%dst) {
+; CHECK-LABEL: f2:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: vl [[REG3:%v[0-9]+]], 0(%r4)
+; CHECK: wfmsxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], [[REG3]]
+; CHECK: vst [[RES]], 0(%r5)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %f3 = load fp128, fp128 *%ptr3
+ %neg = fsub fp128 0xL00000000000000008000000000000000, %f3
+ %res = call fp128 @llvm.fma.f128 (fp128 %f1, fp128 %f2, fp128 %neg)
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+define void @f3(fp128 *%ptr1, fp128 *%ptr2, fp128 *%ptr3, fp128 *%dst) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: vl [[REG3:%v[0-9]+]], 0(%r4)
+; CHECK: wfnmaxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], [[REG3]]
+; CHECK: vst [[RES]], 0(%r5)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %f3 = load fp128, fp128 *%ptr3
+ %res = call fp128 @llvm.fma.f128 (fp128 %f1, fp128 %f2, fp128 %f3)
+ %negres = fsub fp128 0xL00000000000000008000000000000000, %res
+ store fp128 %negres, fp128 *%dst
+ ret void
+}
+
+define void @f4(fp128 *%ptr1, fp128 *%ptr2, fp128 *%ptr3, fp128 *%dst) {
+; CHECK-LABEL: f4:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: vl [[REG3:%v[0-9]+]], 0(%r4)
+; CHECK: wfnmsxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], [[REG3]]
+; CHECK: vst [[RES]], 0(%r5)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %f3 = load fp128, fp128 *%ptr3
+ %neg = fsub fp128 0xL00000000000000008000000000000000, %f3
+ %res = call fp128 @llvm.fma.f128 (fp128 %f1, fp128 %f2, fp128 %neg)
+ %negres = fsub fp128 0xL00000000000000008000000000000000, %res
+ store fp128 %negres, fp128 *%dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-neg-02.ll b/test/CodeGen/SystemZ/fp-neg-02.ll
new file mode 100644
index 0000000000000..38fb3a58d404a
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-neg-02.ll
@@ -0,0 +1,41 @@
+; Test floating-point negation on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test f32.
+define float @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: lcdfr %f0, %f0
+; CHECK: br %r14
+ %res = fsub float -0.0, %f
+ ret float %res
+}
+
+; Test f64.
+define double @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: lcdfr %f0, %f0
+; CHECK: br %r14
+ %res = fsub double -0.0, %f
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure negation would probably
+; be better implemented using an XI on the upper byte. Do some extra
+; processing so that using FPRs is unequivocally better.
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: wflcxb [[NEGREG1:%v[0-9]+]], [[REG1]]
+; CHECK: wfdxb [[RES:%v[0-9]+]], [[NEGREG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %orig = load fp128 , fp128 *%ptr
+ %negzero = fpext float -0.0 to fp128
+ %neg = fsub fp128 0xL00000000000000008000000000000000, %orig
+ %op2 = load fp128 , fp128 *%ptr2
+ %res = fdiv fp128 %neg, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-round-03.ll b/test/CodeGen/SystemZ/fp-round-03.ll
new file mode 100644
index 0000000000000..762e793701d1c
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-round-03.ll
@@ -0,0 +1,207 @@
+; Test rounding functions for z14 and above.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test rint for f32.
+declare float @llvm.rint.f32(float %f)
+define float @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: fiebra %f0, 0, %f0, 0
+; CHECK: br %r14
+ %res = call float @llvm.rint.f32(float %f)
+ ret float %res
+}
+
+; Test rint for f64.
+declare double @llvm.rint.f64(double %f)
+define double @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: fidbra %f0, 0, %f0, 0
+; CHECK: br %r14
+ %res = call double @llvm.rint.f64(double %f)
+ ret double %res
+}
+
+; Test rint for f128.
+declare fp128 @llvm.rint.f128(fp128 %f)
+define void @f3(fp128 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 0, 0
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.rint.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test nearbyint for f32.
+declare float @llvm.nearbyint.f32(float %f)
+define float @f4(float %f) {
+; CHECK-LABEL: f4:
+; CHECK: fiebra %f0, 0, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.nearbyint.f32(float %f)
+ ret float %res
+}
+
+; Test nearbyint for f64.
+declare double @llvm.nearbyint.f64(double %f)
+define double @f5(double %f) {
+; CHECK-LABEL: f5:
+; CHECK: fidbra %f0, 0, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.nearbyint.f64(double %f)
+ ret double %res
+}
+
+; Test nearbyint for f128.
+declare fp128 @llvm.nearbyint.f128(fp128 %f)
+define void @f6(fp128 *%ptr) {
+; CHECK-LABEL: f6:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 0
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.nearbyint.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test floor for f32.
+declare float @llvm.floor.f32(float %f)
+define float @f7(float %f) {
+; CHECK-LABEL: f7:
+; CHECK: fiebra %f0, 7, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.floor.f32(float %f)
+ ret float %res
+}
+
+; Test floor for f64.
+declare double @llvm.floor.f64(double %f)
+define double @f8(double %f) {
+; CHECK-LABEL: f8:
+; CHECK: fidbra %f0, 7, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.floor.f64(double %f)
+ ret double %res
+}
+
+; Test floor for f128.
+declare fp128 @llvm.floor.f128(fp128 %f)
+define void @f9(fp128 *%ptr) {
+; CHECK-LABEL: f9:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 7
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.floor.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test ceil for f32.
+declare float @llvm.ceil.f32(float %f)
+define float @f10(float %f) {
+; CHECK-LABEL: f10:
+; CHECK: fiebra %f0, 6, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.ceil.f32(float %f)
+ ret float %res
+}
+
+; Test ceil for f64.
+declare double @llvm.ceil.f64(double %f)
+define double @f11(double %f) {
+; CHECK-LABEL: f11:
+; CHECK: fidbra %f0, 6, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.ceil.f64(double %f)
+ ret double %res
+}
+
+; Test ceil for f128.
+declare fp128 @llvm.ceil.f128(fp128 %f)
+define void @f12(fp128 *%ptr) {
+; CHECK-LABEL: f12:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 6
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.ceil.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test trunc for f32.
+declare float @llvm.trunc.f32(float %f)
+define float @f13(float %f) {
+; CHECK-LABEL: f13:
+; CHECK: fiebra %f0, 5, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.trunc.f32(float %f)
+ ret float %res
+}
+
+; Test trunc for f64.
+declare double @llvm.trunc.f64(double %f)
+define double @f14(double %f) {
+; CHECK-LABEL: f14:
+; CHECK: fidbra %f0, 5, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.trunc.f64(double %f)
+ ret double %res
+}
+
+; Test trunc for f128.
+declare fp128 @llvm.trunc.f128(fp128 %f)
+define void @f15(fp128 *%ptr) {
+; CHECK-LABEL: f15:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 5
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.trunc.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test round for f32.
+declare float @llvm.round.f32(float %f)
+define float @f16(float %f) {
+; CHECK-LABEL: f16:
+; CHECK: fiebra %f0, 1, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.round.f32(float %f)
+ ret float %res
+}
+
+; Test round for f64.
+declare double @llvm.round.f64(double %f)
+define double @f17(double %f) {
+; CHECK-LABEL: f17:
+; CHECK: fidbra %f0, 1, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.round.f64(double %f)
+ ret double %res
+}
+
+; Test round for f128.
+declare fp128 @llvm.round.f128(fp128 %f)
+define void @f18(fp128 *%ptr) {
+; CHECK-LABEL: f18:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 1
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.round.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-sqrt-01.ll b/test/CodeGen/SystemZ/fp-sqrt-01.ll
index 3680207e7f207..85a46bc2d7fc8 100644
--- a/test/CodeGen/SystemZ/fp-sqrt-01.ll
+++ b/test/CodeGen/SystemZ/fp-sqrt-01.ll
@@ -1,6 +1,8 @@
; Test 32-bit square root.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @llvm.sqrt.f32(float)
declare float @sqrtf(float)
@@ -77,7 +79,7 @@ define float @f6(float *%base, i64 %index) {
; to use SQEB if possible.
define void @f7(float *%ptr) {
; CHECK-LABEL: f7:
-; CHECK: sqeb {{%f[0-9]+}}, 16{{[04]}}(%r15)
+; CHECK-SCALAR: sqeb {{%f[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
%val0 = load volatile float , float *%ptr
%val1 = load volatile float , float *%ptr
@@ -160,7 +162,7 @@ define float @f8(float %dummy, float %val) {
; CHECK: sqebr %f0, %f2
; CHECK: cebr %f0, %f0
; CHECK: bnor %r14
-; CHECK: ler %f0, %f2
+; CHECK: {{ler|ldr}} %f0, %f2
; CHECK: jg sqrtf@PLT
%res = tail call float @sqrtf(float %val)
ret float %res
diff --git a/test/CodeGen/SystemZ/fp-sqrt-04.ll b/test/CodeGen/SystemZ/fp-sqrt-04.ll
new file mode 100644
index 0000000000000..e0fb2569b39a0
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sqrt-04.ll
@@ -0,0 +1,17 @@
+; Test 128-bit floating-point square root on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare fp128 @llvm.sqrt.f128(fp128 %f)
+
+define void @f1(fp128 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfsqxb [[RES:%v[0-9]+]], [[REG]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f = load fp128, fp128 *%ptr
+ %res = call fp128 @llvm.sqrt.f128(fp128 %f)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-sub-01.ll b/test/CodeGen/SystemZ/fp-sub-01.ll
index f4185ca3108dd..41f72e1810e98 100644
--- a/test/CodeGen/SystemZ/fp-sub-01.ll
+++ b/test/CodeGen/SystemZ/fp-sub-01.ll
@@ -1,6 +1,8 @@
; Test 32-bit floating-point subtraction.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
@@ -76,7 +78,7 @@ define float @f6(float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: seb %f0, 16{{[04]}}(%r15)
+; CHECK-SCALAR: seb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
diff --git a/test/CodeGen/SystemZ/fp-sub-04.ll b/test/CodeGen/SystemZ/fp-sub-04.ll
new file mode 100644
index 0000000000000..5f88132664ef3
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sub-04.ll
@@ -0,0 +1,17 @@
+; Test 128-bit floating-point subtraction on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfsxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %sum = fsub fp128 %f1, %f2
+ store fp128 %sum, fp128 *%ptr1
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-add-17.ll b/test/CodeGen/SystemZ/int-add-17.ll
new file mode 100644
index 0000000000000..fd245871c6520
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-17.ll
@@ -0,0 +1,95 @@
+; Test additions between an i64 and a sign-extended i16 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check AGH with no displacement.
+define i64 @f1(i64 %a, i16 *%src) {
+; CHECK-LABEL: f1:
+; CHECK: agh %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i16, i16 *%src
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the high end of the aligned AGH range.
+define i64 @f2(i64 %a, i16 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: agh %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262143
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 %a, i16 *%src) {
+; CHECK-LABEL: f3:
+; CHECK: agfi %r3, 524288
+; CHECK: agh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the high end of the negative aligned AGH range.
+define i64 @f4(i64 %a, i16 *%src) {
+; CHECK-LABEL: f4:
+; CHECK: agh %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -1
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the low end of the AGH range.
+define i64 @f5(i64 %a, i16 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: agh %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %a, i16 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524290
+; CHECK: agh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check that AGH allows an index.
+define i64 @f7(i64 %a, i64 %src, i64 %index) {
+; CHECK-LABEL: f7:
+; CHECK: agh %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i16 *
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
diff --git a/test/CodeGen/SystemZ/int-mul-09.ll b/test/CodeGen/SystemZ/int-mul-09.ll
new file mode 100644
index 0000000000000..3e384e72db5da
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-09.ll
@@ -0,0 +1,95 @@
+; Test multiplications between an i64 and a sign-extended i16 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check MGH with no displacement.
+define i64 @f1(i64 %a, i16 *%src) {
+; CHECK-LABEL: f1:
+; CHECK: mgh %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i16, i16 *%src
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the high end of the aligned MGH range.
+define i64 @f2(i64 %a, i16 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: mgh %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262143
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 %a, i16 *%src) {
+; CHECK-LABEL: f3:
+; CHECK: agfi %r3, 524288
+; CHECK: mgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the high end of the negative aligned MGH range.
+define i64 @f4(i64 %a, i16 *%src) {
+; CHECK-LABEL: f4:
+; CHECK: mgh %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -1
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the low end of the MGH range.
+define i64 @f5(i64 %a, i16 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: mgh %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %a, i16 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524290
+; CHECK: mgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check that MGH allows an index.
+define i64 @f7(i64 %a, i64 %src, i64 %index) {
+; CHECK-LABEL: f7:
+; CHECK: mgh %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i16 *
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
diff --git a/test/CodeGen/SystemZ/int-mul-10.ll b/test/CodeGen/SystemZ/int-mul-10.ll
new file mode 100644
index 0000000000000..a4d80af36a3c8
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-10.ll
@@ -0,0 +1,165 @@
+; Test signed high-part i64->i128 multiplications on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check sign-extended multiplication in which only the high part is used.
+define i64 @f1(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mgrk %r2, %r3, %r4
+; CHECK: br %r14
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check sign-extended multiplication in which only part of the high half
+; is used.
+define i64 @f2(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f2:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mgrk [[REG:%r[0-9]+]], %r3, %r4
+; CHECK: srlg %r2, [[REG]], 3
+; CHECK: br %r14
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 67
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check sign-extended multiplication in which the result is split into
+; high and low halves.
+define i64 @f3(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mgrk %r2, %r3, %r4
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ %low = trunc i128 %mulx to i64
+ %or = or i64 %high, %low
+ ret i64 %or
+}
+
+; Check MG with no displacement.
+define i64 @f4(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f4:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mg %r2, 0(%r4)
+; CHECK: br %r14
+ %b = load i64 , i64 *%src
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the high end of the aligned MG range.
+define i64 @f5(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: mg %r2, 524280(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 65535
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the next doubleword up, which requires separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: mg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 65536
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the high end of the negative aligned MG range.
+define i64 @f7(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f7:
+; CHECK: mg %r2, -8(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 -1
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the low end of the MG range.
+define i64 @f8(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f8:
+; CHECK: mg %r2, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f9(i64 *%dest, i64 %a, i64 *%src) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524296
+; CHECK: mg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check that MG allows an index.
+define i64 @f10(i64 *%dest, i64 %a, i64 %src, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: mg %r2, 524287(%r5,%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
diff --git a/test/CodeGen/SystemZ/int-mul-11.ll b/test/CodeGen/SystemZ/int-mul-11.ll
new file mode 100644
index 0000000000000..f262519825186
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-11.ll
@@ -0,0 +1,32 @@
+; Test three-operand multiplication instructions on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Check MSRKC.
+define i32 @f1(i32 %dummy, i32 %a, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: msrkc %r2, %r3, %r4
+; CHECK: br %r14
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check MSGRKC.
+define i64 @f2(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f2:
+; CHECK: msgrkc %r2, %r3, %r4
+; CHECK: br %r14
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Verify that we still use MSGFR for i32->i64 multiplies.
+define i64 @f3(i64 %a, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: msgfr %r2, %r3
+; CHECK: br %r14
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
diff --git a/test/CodeGen/SystemZ/int-sub-10.ll b/test/CodeGen/SystemZ/int-sub-10.ll
new file mode 100644
index 0000000000000..bf6638575e553
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-sub-10.ll
@@ -0,0 +1,95 @@
+; Test subtractions of a sign-extended i16 from an i64 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check SGH with no displacement.
+define i64 @f1(i64 %a, i16 *%src) {
+; CHECK-LABEL: f1:
+; CHECK: sgh %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i16, i16 *%src
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the high end of the aligned SGH range.
+define i64 @f2(i64 %a, i16 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: sgh %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262143
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 %a, i16 *%src) {
+; CHECK-LABEL: f3:
+; CHECK: agfi %r3, 524288
+; CHECK: sgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the high end of the negative aligned SGH range.
+define i64 @f4(i64 %a, i16 *%src) {
+; CHECK-LABEL: f4:
+; CHECK: sgh %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -1
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the low end of the SGH range.
+define i64 @f5(i64 %a, i16 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: sgh %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %a, i16 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524290
+; CHECK: sgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check that SGH allows an index.
+define i64 @f7(i64 %a, i64 %src, i64 %index) {
+; CHECK-LABEL: f7:
+; CHECK: sgh %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i16 *
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
diff --git a/test/CodeGen/SystemZ/tdc-07.ll b/test/CodeGen/SystemZ/tdc-07.ll
new file mode 100644
index 0000000000000..6651410e7c66f
--- /dev/null
+++ b/test/CodeGen/SystemZ/tdc-07.ll
@@ -0,0 +1,18 @@
+; Test the Test Data Class instruction on z14
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i32 @llvm.s390.tdc.f128(fp128, i64)
+
+; Check using as i32 - f128
+define i32 @f3(fp128 %x) {
+; CHECK-LABEL: f3
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: tcxb %f0, 123
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+ %res = call i32 @llvm.s390.tdc.f128(fp128 %x, i64 123)
+ ret i32 %res
+}
+
diff --git a/test/CodeGen/SystemZ/vec-abs-06.ll b/test/CodeGen/SystemZ/vec-abs-06.ll
new file mode 100644
index 0000000000000..8eee1d9d2507a
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-abs-06.ll
@@ -0,0 +1,47 @@
+; Test f32 and v4f32 absolute on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare float @llvm.fabs.f32(float)
+declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
+
+; Test a plain absolute.
+define <4 x float> @f1(<4 x float> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vflpsb %v24, %v24
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.fabs.v4f32(<4 x float> %val)
+ ret <4 x float> %ret
+}
+
+; Test a negative absolute.
+define <4 x float> @f2(<4 x float> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vflnsb %v24, %v24
+; CHECK: br %r14
+ %abs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %val)
+ %ret = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %abs
+ ret <4 x float> %ret
+}
+
+; Test an f32 absolute that uses vector registers.
+define float @f3(<4 x float> %val) {
+; CHECK-LABEL: f3:
+; CHECK: wflpsb %f0, %v24
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %ret = call float @llvm.fabs.f32(float %scalar)
+ ret float %ret
+}
+
+; Test an f32 negative absolute that uses vector registers.
+define float @f4(<4 x float> %val) {
+; CHECK-LABEL: f4:
+; CHECK: wflnsb %f0, %v24
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %abs = call float @llvm.fabs.f32(float %scalar)
+ %ret = fsub float -0.0, %abs
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-add-02.ll b/test/CodeGen/SystemZ/vec-add-02.ll
new file mode 100644
index 0000000000000..97a9b84a063c4
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-add-02.ll
@@ -0,0 +1,24 @@
+; Test vector addition on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 addition.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vfasb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = fadd <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
+; Test an f32 addition that uses vector registers.
+define float @f2(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfasb %f0, %v24, %v26
+; CHECK: br %r14
+ %scalar1 = extractelement <4 x float> %val1, i32 0
+ %scalar2 = extractelement <4 x float> %val2, i32 0
+ %ret = fadd float %scalar1, %scalar2
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-and-04.ll b/test/CodeGen/SystemZ/vec-and-04.ll
new file mode 100644
index 0000000000000..e9355beb4296c
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-and-04.ll
@@ -0,0 +1,47 @@
+; Test vector NAND on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v16i8 NAND.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vnn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <16 x i8> %val1, %val2
+ %not = xor <16 x i8> %ret, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ ret <16 x i8> %not
+}
+
+; Test a v8i16 NAND.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vnn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <8 x i16> %val1, %val2
+ %not = xor <8 x i16> %ret, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %not
+}
+
+; Test a v4i32 NAND.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vnn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <4 x i32> %val1, %val2
+ %not = xor <4 x i32> %ret, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %not
+}
+
+; Test a v2i64 NAND.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vnn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <2 x i64> %val1, %val2
+ %not = xor <2 x i64> %ret, <i64 -1, i64 -1>
+ ret <2 x i64> %not
+}
diff --git a/test/CodeGen/SystemZ/vec-cmp-07.ll b/test/CodeGen/SystemZ/vec-cmp-07.ll
new file mode 100644
index 0000000000000..f272ba4bd7554
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-cmp-07.ll
@@ -0,0 +1,349 @@
+; Test f32 and v4f32 comparisons on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test oeq.
+define <4 x i32> @f1(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vfcesb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = fcmp oeq <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test one.
+define <4 x i32> @f2(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f2:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v28, %v26
+; CHECK-DAG: vfchsb [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK: vo %v24, [[REG1]], [[REG2]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp one <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ogt.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vfchsb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test oge.
+define <4 x i32> @f4(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vfchesb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = fcmp oge <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ole.
+define <4 x i32> @f5(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vfchesb %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ole <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test olt.
+define <4 x i32> @f6(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vfchsb %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = fcmp olt <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ueq.
+define <4 x i32> @f7(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f7:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v28, %v26
+; CHECK-DAG: vfchsb [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK: vno %v24, [[REG1]], [[REG2]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ueq <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test une.
+define <4 x i32> @f8(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vfcesb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp une <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ugt.
+define <4 x i32> @f9(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ugt <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test uge.
+define <4 x i32> @f10(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp uge <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ule.
+define <4 x i32> @f11(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f11:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ule <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ult.
+define <4 x i32> @f12(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f12:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ult <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ord.
+define <4 x i32> @f13(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f13:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v28, %v26
+; CHECK-DAG: vfchesb [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK: vo %v24, [[REG1]], [[REG2]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ord <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test uno.
+define <4 x i32> @f14(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f14:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v28, %v26
+; CHECK-DAG: vfchesb [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK: vno %v24, [[REG1]], [[REG2]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp uno <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test oeq selects.
+define <4 x float> @f15(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f15:
+; CHECK: vfcesb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp oeq <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test one selects.
+define <4 x float> @f16(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f16:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v26, %v24
+; CHECK-DAG: vfchsb [[REG2:%v[0-9]+]], %v24, %v26
+; CHECK: vo [[REG:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp one <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ogt selects.
+define <4 x float> @f17(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f17:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test oge selects.
+define <4 x float> @f18(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f18:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp oge <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ole selects.
+define <4 x float> @f19(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f19:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ole <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test olt selects.
+define <4 x float> @f20(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f20:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp olt <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ueq selects.
+define <4 x float> @f21(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f21:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v26, %v24
+; CHECK-DAG: vfchsb [[REG2:%v[0-9]+]], %v24, %v26
+; CHECK: vo [[REG:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ueq <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test une selects.
+define <4 x float> @f22(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f22:
+; CHECK: vfcesb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp une <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ugt selects.
+define <4 x float> @f23(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f23:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ugt <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test uge selects.
+define <4 x float> @f24(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f24:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp uge <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ule selects.
+define <4 x float> @f25(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f25:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ule <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ult selects.
+define <4 x float> @f26(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f26:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ult <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ord selects.
+define <4 x float> @f27(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f27:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v26, %v24
+; CHECK-DAG: vfchesb [[REG2:%v[0-9]+]], %v24, %v26
+; CHECK: vo [[REG:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ord <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test uno selects.
+define <4 x float> @f28(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f28:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v26, %v24
+; CHECK-DAG: vfchesb [[REG2:%v[0-9]+]], %v24, %v26
+; CHECK: vo [[REG:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp uno <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test an f32 comparison that uses vector registers.
+define i64 @f29(i64 %a, i64 %b, float %f1, <4 x float> %vec) {
+; CHECK-LABEL: f29:
+; CHECK: wfcsb %f0, %v24
+; CHECK-NEXT: locgrne %r2, %r3
+; CHECK: br %r14
+ %f2 = extractelement <4 x float> %vec, i32 0
+ %cond = fcmp oeq float %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/vec-ctpop-02.ll b/test/CodeGen/SystemZ/vec-ctpop-02.ll
new file mode 100644
index 0000000000000..ee50e88d04301
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-ctpop-02.ll
@@ -0,0 +1,45 @@
+; Test vector population-count instruction on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %a)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a)
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
+
+define <16 x i8> @f1(<16 x i8> %a) {
+; CHECK-LABEL: f1:
+; CHECK: vpopctb %v24, %v24
+; CHECK: br %r14
+
+ %popcnt = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
+ ret <16 x i8> %popcnt
+}
+
+define <8 x i16> @f2(<8 x i16> %a) {
+; CHECK-LABEL: f2:
+; CHECK: vpopcth %v24, %v24
+; CHECK: br %r14
+
+ %popcnt = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %a)
+ ret <8 x i16> %popcnt
+}
+
+define <4 x i32> @f3(<4 x i32> %a) {
+; CHECK-LABEL: f3:
+; CHECK: vpopctf %v24, %v24
+; CHECK: br %r14
+
+ %popcnt = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a)
+ ret <4 x i32> %popcnt
+}
+
+define <2 x i64> @f4(<2 x i64> %a) {
+; CHECK-LABEL: f4:
+; CHECK: vpopctg %v24, %v24
+; CHECK: br %r14
+
+ %popcnt = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
+ ret <2 x i64> %popcnt
+}
+
diff --git a/test/CodeGen/SystemZ/vec-div-02.ll b/test/CodeGen/SystemZ/vec-div-02.ll
new file mode 100644
index 0000000000000..74e3b5148ad53
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-div-02.ll
@@ -0,0 +1,24 @@
+; Test vector division on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 division.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vfdsb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = fdiv <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
+; Test an f32 division that uses vector registers.
+define float @f2(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfdsb %f0, %v24, %v26
+; CHECK: br %r14
+ %scalar1 = extractelement <4 x float> %val1, i32 0
+ %scalar2 = extractelement <4 x float> %val2, i32 0
+ %ret = fdiv float %scalar1, %scalar2
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-intrinsics.ll b/test/CodeGen/SystemZ/vec-intrinsics-01.ll
index 6f5eb0691aa83..6f5eb0691aa83 100644
--- a/test/CodeGen/SystemZ/vec-intrinsics.ll
+++ b/test/CodeGen/SystemZ/vec-intrinsics-01.ll
diff --git a/test/CodeGen/SystemZ/vec-intrinsics-02.ll b/test/CodeGen/SystemZ/vec-intrinsics-02.ll
new file mode 100644
index 0000000000000..84c6a07840318
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-intrinsics-02.ll
@@ -0,0 +1,441 @@
+; Test vector intrinsics added with z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare <2 x i64> @llvm.s390.vbperm(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.s390.vmslg(<2 x i64>, <2 x i64>, <16 x i8>, i32)
+declare <16 x i8> @llvm.s390.vlrl(i32, i8 *)
+declare void @llvm.s390.vstrl(<16 x i8>, i32, i8 *)
+
+declare {<4 x i32>, i32} @llvm.s390.vfcesbs(<4 x float>, <4 x float>)
+declare {<4 x i32>, i32} @llvm.s390.vfchsbs(<4 x float>, <4 x float>)
+declare {<4 x i32>, i32} @llvm.s390.vfchesbs(<4 x float>, <4 x float>)
+declare {<4 x i32>, i32} @llvm.s390.vftcisb(<4 x float>, i32)
+declare <4 x float> @llvm.s390.vfisb(<4 x float>, i32, i32)
+
+declare <2 x double> @llvm.s390.vfmaxdb(<2 x double>, <2 x double>, i32)
+declare <2 x double> @llvm.s390.vfmindb(<2 x double>, <2 x double>, i32)
+declare <4 x float> @llvm.s390.vfmaxsb(<4 x float>, <4 x float>, i32)
+declare <4 x float> @llvm.s390.vfminsb(<4 x float>, <4 x float>, i32)
+
+; VBPERM.
+define <2 x i64> @test_vbperm(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vbperm:
+; CHECK: vbperm %v24, %v24, %v26
+; CHECK: br %r14
+ %res = call <2 x i64> @llvm.s390.vbperm(<16 x i8> %a, <16 x i8> %b)
+ ret <2 x i64> %res
+}
+
+; VMSLG with no shifts.
+define <16 x i8> @test_vmslg1(<2 x i64> %a, <2 x i64> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vmslg1:
+; CHECK: vmslg %v24, %v24, %v26, %v28, 0
+; CHECK: br %r14
+ %res = call <16 x i8> @llvm.s390.vmslg(<2 x i64> %a, <2 x i64> %b, <16 x i8> %c, i32 0)
+ ret <16 x i8> %res
+}
+
+; VMSLG with both shifts.
+define <16 x i8> @test_vmslg2(<2 x i64> %a, <2 x i64> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vmslg2:
+; CHECK: vmslg %v24, %v24, %v26, %v28, 12
+; CHECK: br %r14
+ %res = call <16 x i8> @llvm.s390.vmslg(<2 x i64> %a, <2 x i64> %b, <16 x i8> %c, i32 12)
+ ret <16 x i8> %res
+}
+
+; VLRLR with the lowest in-range displacement.
+define <16 x i8> @test_vlrlr1(i8 *%ptr, i32 %length) {
+; CHECK-LABEL: test_vlrlr1:
+; CHECK: vlrlr %v24, %r3, 0(%r2)
+; CHECK: br %r14
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 %length, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRLR with the highest in-range displacement.
+define <16 x i8> @test_vlrlr2(i8 *%base, i32 %length) {
+; CHECK-LABEL: test_vlrlr2:
+; CHECK: vlrlr %v24, %r3, 4095(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 %length, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRLR with an out-of-range displacement.
+define <16 x i8> @test_vlrlr3(i8 *%base, i32 %length) {
+; CHECK-LABEL: test_vlrlr3:
+; CHECK: vlrlr %v24, %r3, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 %length, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; Check that VLRLR doesn't allow an index.
+define <16 x i8> @test_vlrlr4(i8 *%base, i64 %index, i32 %length) {
+; CHECK-LABEL: test_vlrlr4:
+; CHECK: vlrlr %v24, %r4, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 %length, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRL with the lowest in-range displacement.
+define <16 x i8> @test_vlrl1(i8 *%ptr) {
+; CHECK-LABEL: test_vlrl1:
+; CHECK: vlrl %v24, 0(%r2), 0
+; CHECK: br %r14
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 0, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRL with the highest in-range displacement.
+define <16 x i8> @test_vlrl2(i8 *%base) {
+; CHECK-LABEL: test_vlrl2:
+; CHECK: vlrl %v24, 4095(%r2), 0
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 0, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRL with an out-of-range displacement.
+define <16 x i8> @test_vlrl3(i8 *%base) {
+; CHECK-LABEL: test_vlrl3:
+; CHECK: vlrl %v24, 0({{%r[1-5]}}), 0
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 0, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; Check that VLRL doesn't allow an index.
+define <16 x i8> @test_vlrl4(i8 *%base, i64 %index) {
+; CHECK-LABEL: test_vlrl4:
+; CHECK: vlrl %v24, 0({{%r[1-5]}}), 0
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 0, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VSTRLR with the lowest in-range displacement.
+define void @test_vstrlr1(<16 x i8> %vec, i8 *%ptr, i32 %length) {
+; CHECK-LABEL: test_vstrlr1:
+; CHECK: vstrlr %v24, %r3, 0(%r2)
+; CHECK: br %r14
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 %length, i8 *%ptr)
+ ret void
+}
+
+; VSTRLR with the highest in-range displacement.
+define void @test_vstrlr2(<16 x i8> %vec, i8 *%base, i32 %length) {
+; CHECK-LABEL: test_vstrlr2:
+; CHECK: vstrlr %v24, %r3, 4095(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 %length, i8 *%ptr)
+ ret void
+}
+
+; VSTRLR with an out-of-range displacement.
+define void @test_vstrlr3(<16 x i8> %vec, i8 *%base, i32 %length) {
+; CHECK-LABEL: test_vstrlr3:
+; CHECK: vstrlr %v24, %r3, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 %length, i8 *%ptr)
+ ret void
+}
+
+; Check that VSTRLR doesn't allow an index.
+define void @test_vstrlr4(<16 x i8> %vec, i8 *%base, i64 %index, i32 %length) {
+; CHECK-LABEL: test_vstrlr4:
+; CHECK: vstrlr %v24, %r4, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 %length, i8 *%ptr)
+ ret void
+}
+
+; VSTRL with the lowest in-range displacement.
+define void @test_vstrl1(<16 x i8> %vec, i8 *%ptr) {
+; CHECK-LABEL: test_vstrl1:
+; CHECK: vstrl %v24, 0(%r2), 8
+; CHECK: br %r14
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 8, i8 *%ptr)
+ ret void
+}
+
+; VSTRL with the highest in-range displacement.
+define void @test_vstrl2(<16 x i8> %vec, i8 *%base) {
+; CHECK-LABEL: test_vstrl2:
+; CHECK: vstrl %v24, 4095(%r2), 8
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 8, i8 *%ptr)
+ ret void
+}
+
+; VSTRL with an out-of-range displacement.
+define void @test_vstrl3(<16 x i8> %vec, i8 *%base) {
+; CHECK-LABEL: test_vstrl3:
+; CHECK: vstrl %v24, 0({{%r[1-5]}}), 8
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 8, i8 *%ptr)
+ ret void
+}
+
+; Check that VSTRL doesn't allow an index.
+define void @test_vstrl4(<16 x i8> %vec, i8 *%base, i64 %index) {
+; CHECK-LABEL: test_vstrl4:
+; CHECK: vstrl %v24, 0({{%r[1-5]}}), 8
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 8, i8 *%ptr)
+ ret void
+}
+
+; VFCESBS with no processing of the result.
+define i32 @test_vfcesbs(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfcesbs:
+; CHECK: vfcesbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfcesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ ret i32 %res
+}
+
+; VFCESBS, returning 1 if any elements are equal (CC != 3).
+define i32 @test_vfcesbs_any_bool(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfcesbs_any_bool:
+; CHECK: vfcesbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm %r2
+; CHECK: afi %r2, -536870912
+; CHECK: srl %r2, 31
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfcesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp ne i32 %res, 3
+ %ext = zext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; VFCESBS, storing to %ptr if any elements are equal.
+define <4 x i32> @test_vfcesbs_any_store(<4 x float> %a, <4 x float> %b,
+ i32 *%ptr) {
+; CHECK-LABEL: test_vfcesbs_any_store:
+; CHECK-NOT: %r
+; CHECK: vfcesbs %v24, %v24, %v26
+; CHECK-NEXT: {{bor|bnler}} %r14
+; CHECK: mvhi 0(%r2), 0
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfcesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 0
+ %cc = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp ule i32 %cc, 2
+ br i1 %cmp, label %store, label %exit
+
+store:
+ store i32 0, i32 *%ptr
+ br label %exit
+
+exit:
+ ret <4 x i32> %res
+}
+
+; VFCHSBS with no processing of the result.
+define i32 @test_vfchsbs(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfchsbs:
+; CHECK: vfchsbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchsbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ ret i32 %res
+}
+
+; VFCHSBS, returning 1 if not all elements are higher.
+define i32 @test_vfchsbs_notall_bool(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfchsbs_notall_bool:
+; CHECK: vfchsbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risblg %r2, [[REG]], 31, 159, 36
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchsbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp sge i32 %res, 1
+ %ext = zext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; VFCHSBS, storing to %ptr if not all elements are higher.
+define <4 x i32> @test_vfchsbs_notall_store(<4 x float> %a, <4 x float> %b,
+ i32 *%ptr) {
+; CHECK-LABEL: test_vfchsbs_notall_store:
+; CHECK-NOT: %r
+; CHECK: vfchsbs %v24, %v24, %v26
+; CHECK-NEXT: {{bher|ber}} %r14
+; CHECK: mvhi 0(%r2), 0
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchsbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 0
+ %cc = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp ugt i32 %cc, 0
+ br i1 %cmp, label %store, label %exit
+
+store:
+ store i32 0, i32 *%ptr
+ br label %exit
+
+exit:
+ ret <4 x i32> %res
+}
+
+; VFCHESBS with no processing of the result.
+define i32 @test_vfchesbs(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfchesbs:
+; CHECK: vfchesbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ ret i32 %res
+}
+
+; VFCHESBS, returning 1 if neither element is higher or equal.
+define i32 @test_vfchesbs_none_bool(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfchesbs_none_bool:
+; CHECK: vfchesbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risblg %r2, [[REG]], 31, 159, 35
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp eq i32 %res, 3
+ %ext = zext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; VFCHESBS, storing to %ptr if neither element is higher or equal.
+define <4 x i32> @test_vfchesbs_none_store(<4 x float> %a, <4 x float> %b,
+ i32 *%ptr) {
+; CHECK-LABEL: test_vfchesbs_none_store:
+; CHECK-NOT: %r
+; CHECK: vfchesbs %v24, %v24, %v26
+; CHECK-NEXT: {{bnor|bler}} %r14
+; CHECK: mvhi 0(%r2), 0
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 0
+ %cc = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp uge i32 %cc, 3
+ br i1 %cmp, label %store, label %exit
+
+store:
+ store i32 0, i32 *%ptr
+ br label %exit
+
+exit:
+ ret <4 x i32> %res
+}
+
+; VFTCISB with the lowest useful class selector and no processing of the result.
+define i32 @test_vftcisb(<4 x float> %a) {
+; CHECK-LABEL: test_vftcisb:
+; CHECK: vftcisb {{%v[0-9]+}}, %v24, 1
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vftcisb(<4 x float> %a, i32 1)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ ret i32 %res
+}
+
+; VFTCISB with the highest useful class selector, returning 1 if all elements
+; have the right class (CC == 0).
+define i32 @test_vftcisb_all_bool(<4 x float> %a) {
+; CHECK-LABEL: test_vftcisb_all_bool:
+; CHECK: vftcisb {{%v[0-9]+}}, %v24, 4094
+; CHECK: afi %r2, -268435456
+; CHECK: srl %r2, 31
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vftcisb(<4 x float> %a, i32 4094)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp eq i32 %res, 0
+ %ext = zext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; VFISB with a rounding mode not usable via standard intrinsics.
+define <4 x float> @test_vfisb_0_4(<4 x float> %a) {
+; CHECK-LABEL: test_vfisb_0_4:
+; CHECK: vfisb %v24, %v24, 0, 4
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.s390.vfisb(<4 x float> %a, i32 0, i32 4)
+ ret <4 x float> %res
+}
+
+; VFISB with IEEE-inexact exception suppressed.
+define <4 x float> @test_vfisb_4_0(<4 x float> %a) {
+; CHECK-LABEL: test_vfisb_4_0:
+; CHECK: vfisb %v24, %v24, 4, 0
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.s390.vfisb(<4 x float> %a, i32 4, i32 0)
+ ret <4 x float> %res
+}
+
+; VFMAXDB.
+define <2 x double> @test_vfmaxdb(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test_vfmaxdb:
+; CHECK: vfmaxdb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %res = call <2 x double> @llvm.s390.vfmaxdb(<2 x double> %a, <2 x double> %b, i32 4)
+ ret <2 x double> %res
+}
+
+; VFMINDB.
+define <2 x double> @test_vfmindb(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test_vfmindb:
+; CHECK: vfmindb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %res = call <2 x double> @llvm.s390.vfmindb(<2 x double> %a, <2 x double> %b, i32 4)
+ ret <2 x double> %res
+}
+
+; VFMAXSB.
+define <4 x float> @test_vfmaxsb(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfmaxsb:
+; CHECK: vfmaxsb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.s390.vfmaxsb(<4 x float> %a, <4 x float> %b, i32 4)
+ ret <4 x float> %res
+}
+
+; VFMINSB.
+define <4 x float> @test_vfminsb(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfminsb:
+; CHECK: vfminsb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.s390.vfminsb(<4 x float> %a, <4 x float> %b, i32 4)
+ ret <4 x float> %res
+}
+
diff --git a/test/CodeGen/SystemZ/vec-max-05.ll b/test/CodeGen/SystemZ/vec-max-05.ll
new file mode 100644
index 0000000000000..591d3bf36f168
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-max-05.ll
@@ -0,0 +1,175 @@
+; Test vector maximum on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare double @fmax(double, double)
+declare double @llvm.maxnum.f64(double, double)
+declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>)
+
+declare float @fmaxf(float, float)
+declare float @llvm.maxnum.f32(float, float)
+declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
+
+declare fp128 @fmaxl(fp128, fp128)
+declare fp128 @llvm.maxnum.f128(fp128, fp128)
+
+; Test the fmax library function.
+define double @f1(double %dummy, double %val1, double %val2) {
+; CHECK-LABEL: f1:
+; CHECK: wfmaxdb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call double @fmax(double %val1, double %val2) readnone
+ ret double %ret
+}
+
+; Test the f64 maxnum intrinsic.
+define double @f2(double %dummy, double %val1, double %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfmaxdb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call double @llvm.maxnum.f64(double %val1, double %val2)
+ ret double %ret
+}
+
+; Test a f64 constant compare/select resulting in maxnum.
+define double @f3(double %dummy, double %val) {
+; CHECK-LABEL: f3:
+; CHECK: lzdr [[REG:%f[0-9]+]]
+; CHECK: wfmaxdb %f0, %f2, [[REG]], 4
+; CHECK: br %r14
+ %cmp = fcmp ogt double %val, 0.0
+ %ret = select i1 %cmp, double %val, double 0.0
+ ret double %ret
+}
+
+; Test a f64 constant compare/select resulting in maxnan.
+define double @f4(double %dummy, double %val) {
+; CHECK-LABEL: f4:
+; CHECK: lzdr [[REG:%f[0-9]+]]
+; CHECK: wfmaxdb %f0, %f2, [[REG]], 1
+; CHECK: br %r14
+ %cmp = fcmp ugt double %val, 0.0
+ %ret = select i1 %cmp, double %val, double 0.0
+ ret double %ret
+}
+
+; Test the v2f64 maxnum intrinsic.
+define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
+ <2 x double> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vfmaxdb %v24, %v26, %v28, 4
+; CHECK: br %r14
+ %ret = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %val1, <2 x double> %val2)
+ ret <2 x double> %ret
+}
+
+; Test the fmaxf library function.
+define float @f11(float %dummy, float %val1, float %val2) {
+; CHECK-LABEL: f11:
+; CHECK: wfmaxsb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call float @fmaxf(float %val1, float %val2) readnone
+ ret float %ret
+}
+
+; Test the f32 maxnum intrinsic.
+define float @f12(float %dummy, float %val1, float %val2) {
+; CHECK-LABEL: f12:
+; CHECK: wfmaxsb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call float @llvm.maxnum.f32(float %val1, float %val2)
+ ret float %ret
+}
+
+; Test a f32 constant compare/select resulting in maxnum.
+define float @f13(float %dummy, float %val) {
+; CHECK-LABEL: f13:
+; CHECK: lzer [[REG:%f[0-9]+]]
+; CHECK: wfmaxsb %f0, %f2, [[REG]], 4
+; CHECK: br %r14
+ %cmp = fcmp ogt float %val, 0.0
+ %ret = select i1 %cmp, float %val, float 0.0
+ ret float %ret
+}
+
+; Test a f32 constant compare/select resulting in maxnan.
+define float @f14(float %dummy, float %val) {
+; CHECK-LABEL: f14:
+; CHECK: lzer [[REG:%f[0-9]+]]
+; CHECK: wfmaxsb %f0, %f2, [[REG]], 1
+; CHECK: br %r14
+ %cmp = fcmp ugt float %val, 0.0
+ %ret = select i1 %cmp, float %val, float 0.0
+ ret float %ret
+}
+
+; Test the v4f32 maxnum intrinsic.
+define <4 x float> @f15(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f15:
+; CHECK: vfmaxsb %v24, %v26, %v28, 4
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %val1, <4 x float> %val2)
+ ret <4 x float> %ret
+}
+
+; Test the fmaxl library function.
+define void @f21(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+; CHECK-LABEL: f21:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfmaxxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r4)
+; CHECK: br %r14
+ %val1 = load fp128, fp128* %ptr1
+ %val2 = load fp128, fp128* %ptr2
+ %res = call fp128 @fmaxl(fp128 %val1, fp128 %val2) readnone
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test the f128 maxnum intrinsic.
+define void @f22(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+; CHECK-LABEL: f22:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfmaxxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r4)
+; CHECK: br %r14
+ %val1 = load fp128, fp128* %ptr1
+ %val2 = load fp128, fp128* %ptr2
+ %res = call fp128 @llvm.maxnum.f128(fp128 %val1, fp128 %val2)
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test a f128 constant compare/select resulting in maxnum.
+define void @f23(fp128 *%ptr, fp128 *%dst) {
+; CHECK-LABEL: f23:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfmaxxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r3)
+; CHECK: br %r14
+ %val = load fp128, fp128* %ptr
+ %cmp = fcmp ogt fp128 %val, 0xL00000000000000000000000000000000
+ %res = select i1 %cmp, fp128 %val, fp128 0xL00000000000000000000000000000000
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test a f128 constant compare/select resulting in maxnan.
+define void @f24(fp128 *%ptr, fp128 *%dst) {
+; CHECK-LABEL: f24:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfmaxxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 1
+; CHECK: vst [[RES]], 0(%r3)
+; CHECK: br %r14
+ %val = load fp128, fp128* %ptr
+ %cmp = fcmp ugt fp128 %val, 0xL00000000000000000000000000000000
+ %res = select i1 %cmp, fp128 %val, fp128 0xL00000000000000000000000000000000
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/vec-min-05.ll b/test/CodeGen/SystemZ/vec-min-05.ll
new file mode 100644
index 0000000000000..3eef9016cd087
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-min-05.ll
@@ -0,0 +1,175 @@
+; Test vector minimum on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare double @fmin(double, double)
+declare double @llvm.minnum.f64(double, double)
+declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
+
+declare float @fminf(float, float)
+declare float @llvm.minnum.f32(float, float)
+declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
+
+declare fp128 @fminl(fp128, fp128)
+declare fp128 @llvm.minnum.f128(fp128, fp128)
+
+; Test the fmin library function.
+define double @f1(double %dummy, double %val1, double %val2) {
+; CHECK-LABEL: f1:
+; CHECK: wfmindb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call double @fmin(double %val1, double %val2) readnone
+ ret double %ret
+}
+
+; Test the f64 minnum intrinsic.
+define double @f2(double %dummy, double %val1, double %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfmindb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call double @llvm.minnum.f64(double %val1, double %val2)
+ ret double %ret
+}
+
+; Test a f64 constant compare/select resulting in minnum.
+define double @f3(double %dummy, double %val) {
+; CHECK-LABEL: f3:
+; CHECK: lzdr [[REG:%f[0-9]+]]
+; CHECK: wfmindb %f0, %f2, [[REG]], 4
+; CHECK: br %r14
+ %cmp = fcmp olt double %val, 0.0
+ %ret = select i1 %cmp, double %val, double 0.0
+ ret double %ret
+}
+
+; Test a f64 constant compare/select resulting in minnan.
+define double @f4(double %dummy, double %val) {
+; CHECK-LABEL: f4:
+; CHECK: lzdr [[REG:%f[0-9]+]]
+; CHECK: wfmindb %f0, %f2, [[REG]], 1
+; CHECK: br %r14
+ %cmp = fcmp ult double %val, 0.0
+ %ret = select i1 %cmp, double %val, double 0.0
+ ret double %ret
+}
+
+; Test the v2f64 minnum intrinsic.
+define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
+ <2 x double> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vfmindb %v24, %v26, %v28, 4
+; CHECK: br %r14
+ %ret = call <2 x double> @llvm.minnum.v2f64(<2 x double> %val1, <2 x double> %val2)
+ ret <2 x double> %ret
+}
+
+; Test the fminf library function.
+define float @f11(float %dummy, float %val1, float %val2) {
+; CHECK-LABEL: f11:
+; CHECK: wfminsb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call float @fminf(float %val1, float %val2) readnone
+ ret float %ret
+}
+
+; Test the f32 minnum intrinsic.
+define float @f12(float %dummy, float %val1, float %val2) {
+; CHECK-LABEL: f12:
+; CHECK: wfminsb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call float @llvm.minnum.f32(float %val1, float %val2)
+ ret float %ret
+}
+
+; Test a f32 constant compare/select resulting in minnum.
+define float @f13(float %dummy, float %val) {
+; CHECK-LABEL: f13:
+; CHECK: lzer [[REG:%f[0-9]+]]
+; CHECK: wfminsb %f0, %f2, [[REG]], 4
+; CHECK: br %r14
+ %cmp = fcmp olt float %val, 0.0
+ %ret = select i1 %cmp, float %val, float 0.0
+ ret float %ret
+}
+
+; Test a f32 constant compare/select resulting in minnan.
+define float @f14(float %dummy, float %val) {
+; CHECK-LABEL: f14:
+; CHECK: lzer [[REG:%f[0-9]+]]
+; CHECK: wfminsb %f0, %f2, [[REG]], 1
+; CHECK: br %r14
+ %cmp = fcmp ult float %val, 0.0
+ %ret = select i1 %cmp, float %val, float 0.0
+ ret float %ret
+}
+
+; Test the v4f32 minnum intrinsic.
+define <4 x float> @f15(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f15:
+; CHECK: vfminsb %v24, %v26, %v28, 4
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.minnum.v4f32(<4 x float> %val1, <4 x float> %val2)
+ ret <4 x float> %ret
+}
+
+; Test the fminl library function.
+define void @f21(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+; CHECK-LABEL: f21:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfminxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r4)
+; CHECK: br %r14
+ %val1 = load fp128, fp128* %ptr1
+ %val2 = load fp128, fp128* %ptr2
+ %res = call fp128 @fminl(fp128 %val1, fp128 %val2) readnone
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test the f128 minnum intrinsic.
+define void @f22(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+; CHECK-LABEL: f22:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfminxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r4)
+; CHECK: br %r14
+ %val1 = load fp128, fp128* %ptr1
+ %val2 = load fp128, fp128* %ptr2
+ %res = call fp128 @llvm.minnum.f128(fp128 %val1, fp128 %val2)
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test a f128 constant compare/select resulting in minnum.
+define void @f23(fp128 *%ptr, fp128 *%dst) {
+; CHECK-LABEL: f23:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfminxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r3)
+; CHECK: br %r14
+ %val = load fp128, fp128* %ptr
+ %cmp = fcmp olt fp128 %val, 0xL00000000000000000000000000000000
+ %res = select i1 %cmp, fp128 %val, fp128 0xL00000000000000000000000000000000
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test a f128 constant compare/select resulting in minnan.
+define void @f24(fp128 *%ptr, fp128 *%dst) {
+; CHECK-LABEL: f24:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfminxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 1
+; CHECK: vst [[RES]], 0(%r3)
+; CHECK: br %r14
+ %val = load fp128, fp128* %ptr
+ %cmp = fcmp ult fp128 %val, 0xL00000000000000000000000000000000
+ %res = select i1 %cmp, fp128 %val, fp128 0xL00000000000000000000000000000000
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/vec-move-18.ll b/test/CodeGen/SystemZ/vec-move-18.ll
new file mode 100644
index 0000000000000..5d3d09d83ef15
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-move-18.ll
@@ -0,0 +1,24 @@
+; Test insertions of memory values into 0 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test VLLEZLF.
+define <4 x i32> @f1(i32 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vllezlf %v24, 0(%r2)
+; CHECK: br %r14
+ %val = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> zeroinitializer, i32 %val, i32 0
+ ret <4 x i32> %ret
+}
+
+; Test VLLEZLF with a float.
+define <4 x float> @f2(float *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: vllezlf %v24, 0(%r2)
+; CHECK: br %r14
+ %val = load float, float *%ptr
+ %ret = insertelement <4 x float> zeroinitializer, float %val, i32 0
+ ret <4 x float> %ret
+}
+
diff --git a/test/CodeGen/SystemZ/vec-mul-03.ll b/test/CodeGen/SystemZ/vec-mul-03.ll
new file mode 100644
index 0000000000000..3733db9fb339e
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-mul-03.ll
@@ -0,0 +1,24 @@
+; Test vector multiplication on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 multiplication.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vfmsb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = fmul <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
+; Test an f32 multiplication that uses vector registers.
+define float @f2(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfmsb %f0, %v24, %v26
+; CHECK: br %r14
+ %scalar1 = extractelement <4 x float> %val1, i32 0
+ %scalar2 = extractelement <4 x float> %val2, i32 0
+ %ret = fmul float %scalar1, %scalar2
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-mul-04.ll b/test/CodeGen/SystemZ/vec-mul-04.ll
new file mode 100644
index 0000000000000..d96f0b6a745a8
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-mul-04.ll
@@ -0,0 +1,31 @@
+; Test vector multiply-and-add on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+; Test a v4f32 multiply-and-add.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2, <4 x float> %val3) {
+; CHECK-LABEL: f1:
+; CHECK: vfmasb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.fma.v4f32 (<4 x float> %val1,
+ <4 x float> %val2,
+ <4 x float> %val3)
+ ret <4 x float> %ret
+}
+
+; Test a v4f32 multiply-and-subtract.
+define <4 x float> @f2(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2, <4 x float> %val3) {
+; CHECK-LABEL: f2:
+; CHECK: vfmssb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %negval3 = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %val3
+ %ret = call <4 x float> @llvm.fma.v4f32 (<4 x float> %val1,
+ <4 x float> %val2,
+ <4 x float> %negval3)
+ ret <4 x float> %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-mul-05.ll b/test/CodeGen/SystemZ/vec-mul-05.ll
new file mode 100644
index 0000000000000..90a1f7a7efdf8
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-mul-05.ll
@@ -0,0 +1,63 @@
+; Test vector negative multiply-and-add on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+; Test a v2f64 negative multiply-and-add.
+define <2 x double> @f1(<2 x double> %dummy, <2 x double> %val1,
+ <2 x double> %val2, <2 x double> %val3) {
+; CHECK-LABEL: f1:
+; CHECK: vfnmadb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %ret = call <2 x double> @llvm.fma.v2f64 (<2 x double> %val1,
+ <2 x double> %val2,
+ <2 x double> %val3)
+ %negret = fsub <2 x double> <double -0.0, double -0.0>, %ret
+ ret <2 x double> %negret
+}
+
+; Test a v2f64 negative multiply-and-subtract.
+define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
+ <2 x double> %val2, <2 x double> %val3) {
+; CHECK-LABEL: f2:
+; CHECK: vfnmsdb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %negval3 = fsub <2 x double> <double -0.0, double -0.0>, %val3
+ %ret = call <2 x double> @llvm.fma.v2f64 (<2 x double> %val1,
+ <2 x double> %val2,
+ <2 x double> %negval3)
+ %negret = fsub <2 x double> <double -0.0, double -0.0>, %ret
+ ret <2 x double> %negret
+}
+
+; Test a v4f32 negative multiply-and-add.
+define <4 x float> @f3(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2, <4 x float> %val3) {
+; CHECK-LABEL: f3:
+; CHECK: vfnmasb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.fma.v4f32 (<4 x float> %val1,
+ <4 x float> %val2,
+ <4 x float> %val3)
+ %negret = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %ret
+ ret <4 x float> %negret
+}
+
+; Test a v4f32 negative multiply-and-subtract.
+define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2, <4 x float> %val3) {
+; CHECK-LABEL: f4:
+; CHECK: vfnmssb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %negval3 = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %val3
+ %ret = call <4 x float> @llvm.fma.v4f32 (<4 x float> %val1,
+ <4 x float> %val2,
+ <4 x float> %negval3)
+ %negret = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %ret
+ ret <4 x float> %negret
+}
diff --git a/test/CodeGen/SystemZ/vec-neg-02.ll b/test/CodeGen/SystemZ/vec-neg-02.ll
new file mode 100644
index 0000000000000..07ce037542fde
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-neg-02.ll
@@ -0,0 +1,23 @@
+; Test vector negation on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 negation.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vflcsb %v24, %v26
+; CHECK: br %r14
+ %ret = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %val
+ ret <4 x float> %ret
+}
+
+; Test an f32 negation that uses vector registers.
+define float @f2(<4 x float> %val) {
+; CHECK-LABEL: f2:
+; CHECK: wflcsb %f0, %v24
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %ret = fsub float -0.0, %scalar
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-or-03.ll b/test/CodeGen/SystemZ/vec-or-03.ll
new file mode 100644
index 0000000000000..010629d880d13
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-or-03.ll
@@ -0,0 +1,91 @@
+; Test vector OR-NOT on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v16i8 OR-NOT.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: voc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <16 x i8> %val2, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %ret = or <16 x i8> %val1, %not
+ ret <16 x i8> %ret
+}
+
+; ...and again with the reverse.
+define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: voc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <16 x i8> %val1, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %ret = or <16 x i8> %not, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 OR-NOT.
+define <8 x i16> @f3(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: voc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <8 x i16> %val2, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ %ret = or <8 x i16> %val1, %not
+ ret <8 x i16> %ret
+}
+
+; ...and again with the reverse.
+define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: voc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <8 x i16> %val1, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ %ret = or <8 x i16> %not, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 OR-NOT.
+define <4 x i32> @f5(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: voc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <4 x i32> %val2, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %ret = or <4 x i32> %val1, %not
+ ret <4 x i32> %ret
+}
+
+; ...and again with the reverse.
+define <4 x i32> @f6(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: voc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <4 x i32> %val1, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %ret = or <4 x i32> %not, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 OR-NOT.
+define <2 x i64> @f7(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: voc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <2 x i64> %val2, <i64 -1, i64 -1>
+ %ret = or <2 x i64> %val1, %not
+ ret <2 x i64> %ret
+}
+
+; ...and again with the reverse.
+define <2 x i64> @f8(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: voc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <2 x i64> %val1, <i64 -1, i64 -1>
+ %ret = or <2 x i64> %not, %val2
+ ret <2 x i64> %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-round-02.ll b/test/CodeGen/SystemZ/vec-round-02.ll
new file mode 100644
index 0000000000000..bcd66ea803d1b
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-round-02.ll
@@ -0,0 +1,118 @@
+; Test v4f32 rounding on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare float @llvm.rint.f32(float)
+declare float @llvm.nearbyint.f32(float)
+declare float @llvm.floor.f32(float)
+declare float @llvm.ceil.f32(float)
+declare float @llvm.trunc.f32(float)
+declare float @llvm.round.f32(float)
+declare <4 x float> @llvm.rint.v4f32(<4 x float>)
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>)
+declare <4 x float> @llvm.floor.v4f32(<4 x float>)
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>)
+declare <4 x float> @llvm.round.v4f32(<4 x float>)
+
+define <4 x float> @f1(<4 x float> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vfisb %v24, %v24, 0, 0
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.rint.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f2(<4 x float> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vfisb %v24, %v24, 4, 0
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f3(<4 x float> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vfisb %v24, %v24, 4, 7
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.floor.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f4(<4 x float> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vfisb %v24, %v24, 4, 6
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.ceil.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f5(<4 x float> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vfisb %v24, %v24, 4, 5
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f6(<4 x float> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vfisb %v24, %v24, 4, 1
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.round.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define float @f7(<4 x float> %val) {
+; CHECK-LABEL: f7:
+; CHECK: wfisb %f0, %v24, 0, 0
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.rint.f32(float %scalar)
+ ret float %res
+}
+
+define float @f8(<4 x float> %val) {
+; CHECK-LABEL: f8:
+; CHECK: wfisb %f0, %v24, 4, 0
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.nearbyint.f32(float %scalar)
+ ret float %res
+}
+
+define float @f9(<4 x float> %val) {
+; CHECK-LABEL: f9:
+; CHECK: wfisb %f0, %v24, 4, 7
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.floor.f32(float %scalar)
+ ret float %res
+}
+
+define float @f10(<4 x float> %val) {
+; CHECK-LABEL: f10:
+; CHECK: wfisb %f0, %v24, 4, 6
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.ceil.f32(float %scalar)
+ ret float %res
+}
+
+define float @f11(<4 x float> %val) {
+; CHECK-LABEL: f11:
+; CHECK: wfisb %f0, %v24, 4, 5
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.trunc.f32(float %scalar)
+ ret float %res
+}
+
+define float @f12(<4 x float> %val) {
+; CHECK-LABEL: f12:
+; CHECK: wfisb %f0, %v24, 4, 1
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.round.f32(float %scalar)
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/vec-sqrt-02.ll b/test/CodeGen/SystemZ/vec-sqrt-02.ll
new file mode 100644
index 0000000000000..6970d9db66988
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-sqrt-02.ll
@@ -0,0 +1,23 @@
+; Test f32 and v4f32 square root on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare float @llvm.sqrt.f32(float)
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
+
+define <4 x float> @f1(<4 x float> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vfsqsb %v24, %v24
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %val)
+ ret <4 x float> %ret
+}
+
+define float @f2(<4 x float> %val) {
+; CHECK-LABEL: f2:
+; CHECK: wfsqsb %f0, %v24
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %ret = call float @llvm.sqrt.f32(float %scalar)
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-sub-02.ll b/test/CodeGen/SystemZ/vec-sub-02.ll
new file mode 100644
index 0000000000000..83c76b5d4aa61
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-sub-02.ll
@@ -0,0 +1,31 @@
+; Test vector subtraction on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 subtraction.
+define <4 x float> @f6(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vfssb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = fsub <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
+; Test an f32 subtraction that uses vector registers.
+define float @f7(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: wfssb %f0, %v24, %v26
+; CHECK: br %r14
+ %scalar1 = extractelement <4 x float> %val1, i32 0
+ %scalar2 = extractelement <4 x float> %val2, i32 0
+ %ret = fsub float %scalar1, %scalar2
+ ret float %ret
+}
+
+; Test a v2f32 subtraction, which gets promoted to v4f32.
+define <2 x float> @f14(<2 x float> %val1, <2 x float> %val2) {
+; No particular output expected, but must compile.
+ %ret = fsub <2 x float> %val1, %val2
+ ret <2 x float> %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-xor-02.ll b/test/CodeGen/SystemZ/vec-xor-02.ll
new file mode 100644
index 0000000000000..b4b5a96ba2545
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-xor-02.ll
@@ -0,0 +1,47 @@
+; Test vector NOT-XOR on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v16i8 NOT-XOR.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vnx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <16 x i8> %val1, %val2
+ %not = xor <16 x i8> %ret, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ ret <16 x i8> %not
+}
+
+; Test a v8i16 NOT-XOR.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vnx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <8 x i16> %val1, %val2
+ %not = xor <8 x i16> %ret, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %not
+}
+
+; Test a v4i32 NOT-XOR.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vnx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <4 x i32> %val1, %val2
+ %not = xor <4 x i32> %ret, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %not
+}
+
+; Test a v2i64 NOT-XOR.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vnx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <2 x i64> %val1, %val2
+ %not = xor <2 x i64> %ret, <i64 -1, i64 -1>
+ ret <2 x i64> %not
+}
diff --git a/test/CodeGen/Thumb/litpoolremat.ll b/test/CodeGen/Thumb/litpoolremat.ll
new file mode 100644
index 0000000000000..6ed9b0c2a7ce4
--- /dev/null
+++ b/test/CodeGen/Thumb/litpoolremat.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -mtriple=thumb-apple-darwin | FileCheck %s
+
+declare void @consume_value(i32) #1
+
+declare i32 @get_value(...) #1
+
+declare void @consume_three_values(i32, i32, i32) #1
+
+; Function Attrs: nounwind uwtable
+define void @should_not_spill() #0 {
+ tail call void @consume_value(i32 1764) #2
+ %1 = tail call i32 (...) @get_value() #2
+ %2 = tail call i32 (...) @get_value() #2
+ %3 = tail call i32 (...) @get_value() #2
+ tail call void @consume_value(i32 %1) #2
+ tail call void @consume_value(i32 %2) #2
+ tail call void @consume_value(i32 %3) #2
+ tail call void @consume_value(i32 1764) #2
+ tail call void @consume_three_values(i32 %1, i32 %2, i32 %3) #2
+ ret void
+}
+
+; CHECK: ldr r0, LCPI0_0
+; CHECK-NOT: str r0
+; CHECK: bl
+; CHECK: ldr r0, LCPI0_0
+; CHECK-LABEL: LCPI0_0:
+; CHECK-NEXT: .long 1764
diff --git a/test/CodeGen/Thumb/select.ll b/test/CodeGen/Thumb/select.ll
index fe69a39e350c8..75dbeab5ad0f7 100644
--- a/test/CodeGen/Thumb/select.ll
+++ b/test/CodeGen/Thumb/select.ll
@@ -74,9 +74,9 @@ define double @f7(double %a, double %b) {
}
; CHECK-LABEL: f7:
; CHECK: blt
-; CHECK: blt
+; CHECK: {{blt|bge}}
; CHECK: __ltdf2
; CHECK-EABI-LABEL: f7:
; CHECK-EABI: __aeabi_dcmplt
; CHECK-EABI: bne
-; CHECK-EABI: bne
+; CHECK-EABI: {{bne|beq}}
diff --git a/test/CodeGen/WebAssembly/indirect-import.ll b/test/CodeGen/WebAssembly/indirect-import.ll
index 1bde65bcbbba9..7cac31a2aef56 100644
--- a/test/CodeGen/WebAssembly/indirect-import.ll
+++ b/test/CodeGen/WebAssembly/indirect-import.ll
@@ -19,9 +19,9 @@ entry:
%vs = alloca void (%struct.big*)*, align 4
%s = alloca void (%struct.big*)*, align 4
-; CHECK: i32.const {{.+}}=, extern_fd@FUNCTION
+; CHECK-DAG: i32.const {{.+}}=, extern_fd@FUNCTION
+; CHECK-DAG: i32.const {{.+}}=, extern_vj@FUNCTION
store float (double)* @extern_fd, float (double)** %fd, align 4
-; CHECK: i32.const {{.+}}=, extern_vj@FUNCTION
store void (i64)* @extern_vj, void (i64)** %vj, align 4
%0 = load void (i64)*, void (i64)** %vj, align 4
call void %0(i64 1)
@@ -36,10 +36,9 @@ entry:
%2 = load i32 (i64, i32, double, float)*, i32 (i64, i32, double, float)** %ijidf, align 4
%call = call i32 %2(i64 1, i32 2, double 3.000000e+00, float 4.000000e+00)
-; CHECK: i32.const {{.+}}=, extern_struct@FUNCTION
+; CHECK-DAG: i32.const {{.+}}=, extern_struct@FUNCTION
+; CHECK-DAG: i32.const {{.+}}=, extern_sret@FUNCTION
store void (%struct.big*)* @extern_struct, void (%struct.big*)** %vs, align 4
-
-; CHECK: i32.const {{.+}}=, extern_sret@FUNCTION
store void (%struct.big*)* @extern_sret, void (%struct.big*)** %s, align 4
%3 = load float (double)*, float (double)** %fd, align 4
%4 = ptrtoint float (double)* %3 to i32
diff --git a/test/CodeGen/WebAssembly/userstack.ll b/test/CodeGen/WebAssembly/userstack.ll
index c160b391f6e81..2580771eb2cff 100644
--- a/test/CodeGen/WebAssembly/userstack.ll
+++ b/test/CodeGen/WebAssembly/userstack.ll
@@ -36,13 +36,13 @@ define void @alloca3264() {
; CHECK-NEXT: tee_local $push[[L5:.+]]=, [[SP:.+]], $pop[[L6]]
%r1 = alloca i32
%r2 = alloca double
- ; CHECK-NEXT: i32.const $push[[L0:.+]]=, 0
- ; CHECK-NEXT: i32.store 12($pop[[L5]]), $pop[[L0]]
store i32 0, i32* %r1
- ; CHECK-NEXT: get_local $push[[L2:.+]]=, [[SP]]{{$}}
- ; CHECK-NEXT: i64.const $push[[L1:.+]]=, 0
- ; CHECK-NEXT: i64.store 0($pop[[L2]]), $pop[[L1]]
store double 0.0, double* %r2
+ ; CHECK-NEXT: i64.const $push[[L1:.+]]=, 0
+ ; CHECK-NEXT: i64.store 0($pop[[L5]]), $pop[[L1]]
+ ; CHECK-NEXT: get_local $push[[L2:.+]]=, [[SP]]{{$}}
+ ; CHECK-NEXT: i32.const $push[[L0:.+]]=, 0
+ ; CHECK-NEXT: i32.store 12($pop[[L2]]), $pop[[L0]]
; CHECK-NEXT: return
ret void
}
diff --git a/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll b/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
index 7da85d3a9a1d0..fa71bffaf8c64 100644
--- a/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
+++ b/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+cmov | FileCheck %s
+; RUN: llc < %s -march=x86 -mattr=+cmov -x86-cmov-converter=false | FileCheck %s
;
; Test scheduling a multi-use compare. We should neither spill flags
; nor clone the compare.
diff --git a/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll b/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll
index 8d387136da9c8..37f01845db799 100644
--- a/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll
+++ b/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
; CHECK-NOT: -{{[1-9][0-9]*}}(%rsp)
-define x86_64_win64cc x86_fp80 @a(i64 %x) nounwind readnone {
+define win64cc x86_fp80 @a(i64 %x) nounwind readnone {
entry:
%conv = sitofp i64 %x to x86_fp80 ; <x86_fp80> [#uses=1]
ret x86_fp80 %conv
diff --git a/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
index ba5de8eb5fcb7..e812cbe3270ad 100644
--- a/test/CodeGen/X86/2011-10-19-widen_vselect.ll
+++ b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
@@ -83,10 +83,11 @@ define void @full_test() {
; X32-NEXT: cmpeqps %xmm2, %xmm1
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: blendvps %xmm0, %xmm2, %xmm4
-; X32-NEXT: extractps $1, %xmm4, {{[0-9]+}}(%esp)
; X32-NEXT: movss %xmm4, {{[0-9]+}}(%esp)
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
+; X32-NEXT: movshdup {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; X32-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X32-NEXT: movss %xmm4, {{[0-9]+}}(%esp)
+; X32-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X32-NEXT: addl $60, %esp
; X32-NEXT: retl
;
diff --git a/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll b/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll
index 9dff4e596caa3..72807922a22b1 100644
--- a/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll
+++ b/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck --check-prefix=CHECK %s
+; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck %s
declare x86_regcallcc i32 @callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0);
diff --git a/test/CodeGen/X86/alias-static-alloca.ll b/test/CodeGen/X86/alias-static-alloca.ll
new file mode 100644
index 0000000000000..f4ca7e39f4fcb
--- /dev/null
+++ b/test/CodeGen/X86/alias-static-alloca.ll
@@ -0,0 +1,37 @@
+; RUN: llc -o - -mtriple=x86_64-linux-gnu %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; We should be able to bypass the load values to their corresponding
+; stores here.
+
+; CHECK-LABEL: foo
+; CHECK-DAG: movl %esi, -8(%rsp)
+; CHECK-DAG: movl %ecx, -16(%rsp)
+; CHECK-DAG: movl %edi, -4(%rsp)
+; CHECK-DAG: movl %edx, -12(%rsp)
+; CHECK: leal
+; CHECK: addl
+; CHECK: addl
+; CHECK: retq
+
+define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) {
+entry:
+ %a0 = alloca i32
+ %a1 = alloca i32
+ %a2 = alloca i32
+ %a3 = alloca i32
+ store i32 %b, i32* %a1
+ store i32 %d, i32* %a3
+ store i32 %a, i32* %a0
+ store i32 %c, i32* %a2
+ %l0 = load i32, i32* %a0
+ %l1 = load i32, i32* %a1
+ %l2 = load i32, i32* %a2
+ %l3 = load i32, i32* %a3
+ %add0 = add nsw i32 %l0, %l1
+ %add1 = add nsw i32 %add0, %l2
+ %add2 = add nsw i32 %add1, %l3
+ ret i32 %add2
+}
diff --git a/test/CodeGen/X86/atomic-minmax-i6432.ll b/test/CodeGen/X86/atomic-minmax-i6432.ll
index d5d3fa6db5e83..1a6fde371f09c 100644
--- a/test/CodeGen/X86/atomic-minmax-i6432.ll
+++ b/test/CodeGen/X86/atomic-minmax-i6432.ll
@@ -9,32 +9,32 @@ define void @atomic_maxmin_i6432() {
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
; LINUX: sbbl
-; LINUX: cmovne
-; LINUX: cmovne
+; LINUX: jne
+; LINUX: jne
; LINUX: lock cmpxchg8b
; LINUX: jne [[LABEL]]
%2 = atomicrmw min i64* @sc64, i64 6 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
; LINUX: sbbl
-; LINUX: cmovne
-; LINUX: cmovne
+; LINUX: jne
+; LINUX: jne
; LINUX: lock cmpxchg8b
; LINUX: jne [[LABEL]]
%3 = atomicrmw umax i64* @sc64, i64 7 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
; LINUX: sbbl
-; LINUX: cmovne
-; LINUX: cmovne
+; LINUX: jne
+; LINUX: jne
; LINUX: lock cmpxchg8b
; LINUX: jne [[LABEL]]
%4 = atomicrmw umin i64* @sc64, i64 8 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
; LINUX: sbbl
-; LINUX: cmovne
-; LINUX: cmovne
+; LINUX: jne
+; LINUX: jne
; LINUX: lock cmpxchg8b
; LINUX: jne [[LABEL]]
ret void
diff --git a/test/CodeGen/X86/atomic128.ll b/test/CodeGen/X86/atomic128.ll
index 77bbdec826a59..c6300708bcc1a 100644
--- a/test/CodeGen/X86/atomic128.ll
+++ b/test/CodeGen/X86/atomic128.ll
@@ -167,14 +167,24 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %rdx, %rcx
; CHECK-NEXT: setge %cl
; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: jne LBB5_3
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: movq %rsi, %rbx
-; CHECK-NEXT: cmovneq %rax, %rbx
+; CHECK-NEXT: LBB5_3: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: jne LBB5_5
+; CHECK-NEXT: ## BB#4: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: movq %r8, %rcx
-; CHECK-NEXT: cmovneq %rdx, %rcx
+; CHECK-NEXT: LBB5_5: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB5_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## BB#6: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -203,14 +213,24 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %r8, %rcx
; CHECK-NEXT: setge %cl
; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: jne LBB6_3
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB6_1 Depth=1
; CHECK-NEXT: movq %rsi, %rbx
-; CHECK-NEXT: cmovneq %rax, %rbx
+; CHECK-NEXT: LBB6_3: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB6_1 Depth=1
; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: jne LBB6_5
+; CHECK-NEXT: ## BB#4: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB6_1 Depth=1
; CHECK-NEXT: movq %r8, %rcx
-; CHECK-NEXT: cmovneq %rdx, %rcx
+; CHECK-NEXT: LBB6_5: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB6_1 Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB6_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## BB#6: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -239,14 +259,24 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %rdx, %rcx
; CHECK-NEXT: setae %cl
; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: jne LBB7_3
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: movq %rsi, %rbx
-; CHECK-NEXT: cmovneq %rax, %rbx
+; CHECK-NEXT: LBB7_3: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: jne LBB7_5
+; CHECK-NEXT: ## BB#4: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: movq %r8, %rcx
-; CHECK-NEXT: cmovneq %rdx, %rcx
+; CHECK-NEXT: LBB7_5: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB7_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## BB#6: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@@ -275,14 +305,24 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %rdx, %rcx
; CHECK-NEXT: setb %cl
; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: jne LBB8_3
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB8_1 Depth=1
; CHECK-NEXT: movq %rsi, %rbx
-; CHECK-NEXT: cmovneq %rax, %rbx
+; CHECK-NEXT: LBB8_3: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB8_1 Depth=1
; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: jne LBB8_5
+; CHECK-NEXT: ## BB#4: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB8_1 Depth=1
; CHECK-NEXT: movq %r8, %rcx
-; CHECK-NEXT: cmovneq %rdx, %rcx
+; CHECK-NEXT: LBB8_5: ## %atomicrmw.start
+; CHECK-NEXT: ## in Loop: Header=BB8_1 Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB8_1
-; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: ## BB#6: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
diff --git a/test/CodeGen/X86/avx-schedule.ll b/test/CodeGen/X86/avx-schedule.ll
index a12a412fb94d6..953f3bdd06e87 100644
--- a/test/CodeGen/X86/avx-schedule.ll
+++ b/test/CodeGen/X86/avx-schedule.ll
@@ -27,9 +27,9 @@ define <4 x double> @test_addpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; ZNVER1-LABEL: test_addpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fadd <4 x double> %1, %2
@@ -57,9 +57,9 @@ define <8 x float> @test_addps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; ZNVER1-LABEL: test_addps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fadd <8 x float> %1, %2
@@ -87,9 +87,9 @@ define <4 x double> @test_addsubpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
;
; ZNVER1-LABEL: test_addsubpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %1, <4 x double> %2)
@@ -118,9 +118,9 @@ define <8 x float> @test_addsubps(<8 x float> %a0, <8 x float> %a1, <8 x float>
;
; ZNVER1-LABEL: test_addsubps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %1, <8 x float> %2)
@@ -152,10 +152,10 @@ define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
;
; ZNVER1-LABEL: test_andnotpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
%3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -193,10 +193,10 @@ define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float>
;
; ZNVER1-LABEL: test_andnotps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
%3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -234,10 +234,10 @@ define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; ZNVER1-LABEL: test_andpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
%3 = and <4 x i64> %1, %2
@@ -273,10 +273,10 @@ define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; ZNVER1-LABEL: test_andps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
%3 = and <4 x i64> %1, %2
@@ -313,9 +313,9 @@ define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x doubl
; ZNVER1-LABEL: test_blendpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fadd <4 x double> %a1, %1
@@ -345,8 +345,8 @@ define <8 x float> @test_blendps(<8 x float> %a0, <8 x float> %a1, <8 x float> *
; ZNVER1-LABEL: test_blendps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
-; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 3, i32 12, i32 13, i32 14, i32 7>
@@ -374,9 +374,9 @@ define <4 x double> @test_blendvpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
;
; ZNVER1-LABEL: test_blendvpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
%2 = load <4 x double>, <4 x double> *%a3, align 32
%3 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %1, <4 x double> %2, <4 x double> %a2)
@@ -405,9 +405,9 @@ define <8 x float> @test_blendvps(<8 x float> %a0, <8 x float> %a1, <8 x float>
;
; ZNVER1-LABEL: test_blendvps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
%2 = load <8 x float>, <8 x float> *%a3, align 32
%3 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %1, <8 x float> %2, <8 x float> %a2)
@@ -433,8 +433,8 @@ define <8 x float> @test_broadcastf128(<4 x float> *%a0) {
;
; ZNVER1-LABEL: test_broadcastf128:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <4 x float>, <4 x float> *%a0, align 32
%2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
ret <8 x float> %2
@@ -458,8 +458,8 @@ define <4 x double> @test_broadcastsd_ymm(double *%a0) {
;
; ZNVER1-LABEL: test_broadcastsd_ymm:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load double, double *%a0, align 8
%2 = insertelement <4 x double> undef, double %1, i32 0
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> zeroinitializer
@@ -484,8 +484,8 @@ define <4 x float> @test_broadcastss(float *%a0) {
;
; ZNVER1-LABEL: test_broadcastss:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load float, float *%a0, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
@@ -510,8 +510,8 @@ define <8 x float> @test_broadcastss_ymm(float *%a0) {
;
; ZNVER1-LABEL: test_broadcastss_ymm:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load float, float *%a0, align 4
%2 = insertelement <8 x float> undef, float %1, i32 0
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> zeroinitializer
@@ -543,9 +543,9 @@ define <4 x double> @test_cmppd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; ZNVER1-LABEL: test_cmppd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
-; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
-; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fcmp oeq <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fcmp oeq <4 x double> %a0, %2
@@ -581,9 +581,9 @@ define <8 x float> @test_cmpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; ZNVER1-LABEL: test_cmpps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
-; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
-; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fcmp oeq <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fcmp oeq <8 x float> %a0, %2
@@ -618,10 +618,10 @@ define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
;
; ZNVER1-LABEL: test_cvtdq2pd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:1.00]
-; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sitofp <4 x i32> %a0 to <4 x double>
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
%3 = sitofp <4 x i32> %2 to <4 x double>
@@ -655,10 +655,10 @@ define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
;
; ZNVER1-LABEL: test_cvtdq2ps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:1.00]
-; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sitofp <8 x i32> %a0 to <8 x float>
%2 = load <8 x i32>, <8 x i32> *%a1, align 16
%3 = sitofp <8 x i32> %2 to <8 x float>
@@ -690,10 +690,10 @@ define <8 x i32> @test_cvtpd2dq(<4 x double> %a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_cvtpd2dq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00]
-; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptosi <4 x double> %a0 to <4 x i32>
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = fptosi <4 x double> %2 to <4 x i32>
@@ -725,10 +725,10 @@ define <8 x float> @test_cvtpd2ps(<4 x double> %a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_cvtpd2ps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00]
-; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptrunc <4 x double> %a0 to <4 x float>
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = fptrunc <4 x double> %2 to <4 x float>
@@ -760,10 +760,10 @@ define <8 x i32> @test_cvtps2dq(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_cvtps2dq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [8:1.00]
-; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptosi <8 x float> %a0 to <8 x i32>
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = fptosi <8 x float> %2 to <8 x i32>
@@ -792,9 +792,9 @@ define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; ZNVER1-LABEL: test_divpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
-; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:1.00]
+; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fdiv <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fdiv <4 x double> %1, %2
@@ -822,9 +822,9 @@ define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; ZNVER1-LABEL: test_divps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00]
-; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [15:1.00]
+; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [22:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fdiv <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fdiv <8 x float> %1, %2
@@ -853,8 +853,8 @@ define <8 x float> @test_dpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2
; ZNVER1-LABEL: test_dpps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %1, <8 x float> %2, i8 7)
@@ -886,9 +886,9 @@ define <4 x float> @test_extractf128(<8 x float> %a0, <8 x float> %a1, <4 x floa
; ZNVER1-LABEL: test_extractf128:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00]
+; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:0.50]
; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
store <4 x float> %2, <4 x float> *%a2
@@ -916,9 +916,9 @@ define <4 x double> @test_haddpd(<4 x double> %a0, <4 x double> %a1, <4 x double
;
; ZNVER1-LABEL: test_haddpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %1, <4 x double> %2)
@@ -947,9 +947,9 @@ define <8 x float> @test_haddps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
;
; ZNVER1-LABEL: test_haddps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %1, <8 x float> %2)
@@ -978,9 +978,9 @@ define <4 x double> @test_hsubpd(<4 x double> %a0, <4 x double> %a1, <4 x double
;
; ZNVER1-LABEL: test_hsubpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %1, <4 x double> %2)
@@ -1009,9 +1009,9 @@ define <8 x float> @test_hsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
;
; ZNVER1-LABEL: test_hsubps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %1, <8 x float> %2)
@@ -1044,9 +1044,9 @@ define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float
; ZNVER1-LABEL: test_insertf128:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50]
-; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
%3 = load <4 x float>, <4 x float> *%a2, align 16
@@ -1074,8 +1074,8 @@ define <32 x i8> @test_lddqu(i8* %a0) {
;
; ZNVER1-LABEL: test_lddqu:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vlddqu (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vlddqu (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0)
ret <32 x i8> %1
}
@@ -1108,7 +1108,7 @@ define <2 x double> @test_maskmovpd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) {
; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00]
; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00]
; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1)
call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %a1, <2 x double> %a2)
ret <2 x double> %1
@@ -1143,7 +1143,7 @@ define <4 x double> @test_maskmovpd_ymm(i8* %a0, <4 x i64> %a1, <4 x double> %a2
; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00]
; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1)
call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %a1, <4 x double> %a2)
ret <4 x double> %1
@@ -1178,7 +1178,7 @@ define <4 x float> @test_maskmovps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) {
; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00]
; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00]
; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1)
call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %a1, <4 x float> %a2)
ret <4 x float> %1
@@ -1213,7 +1213,7 @@ define <8 x float> @test_maskmovps_ymm(i8* %a0, <8 x i32> %a1, <8 x float> %a2)
; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00]
; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1)
call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %a1, <8 x float> %a2)
ret <8 x float> %1
@@ -1243,8 +1243,8 @@ define <4 x double> @test_maxpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; ZNVER1-LABEL: test_maxpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %1, <4 x double> %2)
@@ -1274,8 +1274,8 @@ define <8 x float> @test_maxps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; ZNVER1-LABEL: test_maxps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %1, <8 x float> %2)
@@ -1305,8 +1305,8 @@ define <4 x double> @test_minpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
; ZNVER1-LABEL: test_minpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %1, <4 x double> %2)
@@ -1336,8 +1336,8 @@ define <8 x float> @test_minps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
; ZNVER1-LABEL: test_minps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %1, <8 x float> %2)
@@ -1369,10 +1369,10 @@ define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_movapd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <4 x double>, <4 x double> *%a0, align 32
%2 = fadd <4 x double> %1, %1
store <4 x double> %2, <4 x double> *%a1, align 32
@@ -1403,10 +1403,10 @@ define <8 x float> @test_movaps(<8 x float> *%a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_movaps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <8 x float>, <8 x float> *%a0, align 32
%2 = fadd <8 x float> %1, %1
store <8 x float> %2, <8 x float> *%a1, align 32
@@ -1437,10 +1437,10 @@ define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_movddup:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00]
+; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [8:0.50]
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -1468,9 +1468,9 @@ define i32 @test_movmskpd(<4 x double> %a0) {
;
; ZNVER1-LABEL: test_movmskpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.50]
+; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0)
ret i32 %1
}
@@ -1496,9 +1496,9 @@ define i32 @test_movmskps(<8 x float> %a0) {
;
; ZNVER1-LABEL: test_movmskps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.50]
+; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0)
ret i32 %1
}
@@ -1525,9 +1525,9 @@ define <4 x double> @test_movntpd(<4 x double> %a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_movntpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd <4 x double> %a0, %a0
store <4 x double> %1, <4 x double> *%a1, align 32, !nontemporal !0
ret <4 x double> %1
@@ -1554,9 +1554,9 @@ define <8 x float> @test_movntps(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_movntps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd <8 x float> %a0, %a0
store <8 x float> %1, <8 x float> *%a1, align 32, !nontemporal !0
ret <8 x float> %1
@@ -1586,10 +1586,10 @@ define <8 x float> @test_movshdup(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_movshdup:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00]
+; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [8:0.50]
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -1621,10 +1621,10 @@ define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_movsldup:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00]
+; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [8:0.50]
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -1658,10 +1658,10 @@ define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_movupd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <4 x double>, <4 x double> *%a0, align 1
%2 = fadd <4 x double> %1, %1
store <4 x double> %2, <4 x double> *%a1, align 1
@@ -1694,10 +1694,10 @@ define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_movups:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <8 x float>, <8 x float> *%a0, align 1
%2 = fadd <8 x float> %1, %1
store <8 x float> %2, <8 x float> *%a1, align 1
@@ -1725,9 +1725,9 @@ define <4 x double> @test_mulpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; ZNVER1-LABEL: test_mulpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00]
-; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fmul <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fmul <4 x double> %1, %2
@@ -1755,9 +1755,9 @@ define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; ZNVER1-LABEL: test_mulps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
-; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fmul <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fmul <8 x float> %1, %2
@@ -1788,10 +1788,10 @@ define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2)
;
; ZNVER1-LABEL: orpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
%3 = or <4 x i64> %1, %2
@@ -1827,10 +1827,10 @@ define <8 x float> @test_orps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2
;
; ZNVER1-LABEL: test_orps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
%3 = or <4 x i64> %1, %2
@@ -1866,10 +1866,10 @@ define <2 x double> @test_permilpd(<2 x double> %a0, <2 x double> *%a1) {
;
; ZNVER1-LABEL: test_permilpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00]
+; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [8:0.50]
; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 1, i32 0>
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> <i32 1, i32 0>
@@ -1901,10 +1901,10 @@ define <4 x double> @test_permilpd_ymm(<4 x double> %a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_permilpd_ymm:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00]
+; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:0.50]
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
@@ -1936,10 +1936,10 @@ define <4 x float> @test_permilps(<4 x float> %a0, <4 x float> *%a1) {
;
; ZNVER1-LABEL: test_permilps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00]
+; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -1971,10 +1971,10 @@ define <8 x float> @test_permilps_ymm(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_permilps_ymm:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00]
+; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:0.50]
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -2004,8 +2004,8 @@ define <2 x double> @test_permilvarpd(<2 x double> %a0, <2 x i64> %a1, <2 x i64>
; ZNVER1-LABEL: test_permilvarpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> %2)
@@ -2035,8 +2035,8 @@ define <4 x double> @test_permilvarpd_ymm(<4 x double> %a0, <4 x i64> %a1, <4 x
; ZNVER1-LABEL: test_permilvarpd_ymm:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> %2)
@@ -2066,8 +2066,8 @@ define <4 x float> @test_permilvarps(<4 x float> %a0, <4 x i32> %a1, <4 x i32> *
; ZNVER1-LABEL: test_permilvarps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> %2)
@@ -2097,8 +2097,8 @@ define <8 x float> @test_permilvarps_ymm(<8 x float> %a0, <8 x i32> %a1, <8 x i3
; ZNVER1-LABEL: test_permilvarps_ymm:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1)
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> %2)
@@ -2130,10 +2130,10 @@ define <8 x float> @test_rcpps(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_rcpps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:2.00]
-; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [12:0.50]
+; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:0.50]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %2)
@@ -2166,10 +2166,10 @@ define <4 x double> @test_roundpd(<4 x double> %a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_roundpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [8:1.00]
+; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00]
; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7)
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %2, i32 7)
@@ -2202,10 +2202,10 @@ define <8 x float> @test_roundps(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_roundps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [8:1.00]
+; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [10:1.00]
; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7)
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %2, i32 7)
@@ -2238,10 +2238,10 @@ define <8 x float> @test_rsqrtps(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_rsqrtps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:2.00]
-; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:2.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [12:0.50]
+; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [5:0.50]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %2)
@@ -2275,9 +2275,9 @@ define <4 x double> @test_shufpd(<4 x double> %a0, <4 x double> %a1, <4 x double
; ZNVER1-LABEL: test_shufpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
-; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
@@ -2307,8 +2307,8 @@ define <8 x float> @test_shufps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
; ZNVER1-LABEL: test_shufps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50]
-; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 0, i32 8, i32 8, i32 4, i32 4, i32 12, i32 12>
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 3, i32 8, i32 8, i32 4, i32 7, i32 12, i32 12>
@@ -2339,10 +2339,10 @@ define <4 x double> @test_sqrtpd(<4 x double> %a0, <4 x double> *%a1) {
;
; ZNVER1-LABEL: test_sqrtpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [59:54.00]
-; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [54:54.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [27:1.00]
+; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [20:1.00]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0)
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %2)
@@ -2375,10 +2375,10 @@ define <8 x float> @test_sqrtps(<8 x float> %a0, <8 x float> *%a1) {
;
; ZNVER1-LABEL: test_sqrtps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [47:42.00]
-; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [42:42.00]
-; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [27:1.00]
+; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [20:1.00]
+; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %2)
@@ -2408,9 +2408,9 @@ define <4 x double> @test_subpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; ZNVER1-LABEL: test_subpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fsub <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fsub <4 x double> %1, %2
@@ -2438,9 +2438,9 @@ define <8 x float> @test_subps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; ZNVER1-LABEL: test_subps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fsub <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fsub <8 x float> %1, %2
@@ -2477,12 +2477,12 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
;
; ZNVER1-LABEL: test_testpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50]
-; ZNVER1-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: setb %al # sched: [1:0.50]
-; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [6:1.00]
-; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: setb %al # sched: [1:0.25]
+; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %2)
@@ -2523,13 +2523,13 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
;
; ZNVER1-LABEL: test_testpd_ymm:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50]
-; ZNVER1-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: setb %al # sched: [1:0.50]
-; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50]
+; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: setb %al # sched: [1:0.25]
+; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %2)
@@ -2568,12 +2568,12 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
;
; ZNVER1-LABEL: test_testps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50]
-; ZNVER1-NEXT: vtestps %xmm1, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: setb %al # sched: [1:0.50]
-; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [6:1.00]
-; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: vtestps %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: setb %al # sched: [1:0.25]
+; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %2)
@@ -2614,13 +2614,13 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
;
; ZNVER1-LABEL: test_testps_ymm:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50]
-; ZNVER1-NEXT: vtestps %ymm1, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: setb %al # sched: [1:0.50]
-; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50]
+; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: vtestps %ymm1, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: setb %al # sched: [1:0.25]
+; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %2)
@@ -2654,9 +2654,9 @@ define <4 x double> @test_unpckhpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
; ZNVER1-LABEL: test_unpckhpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
-; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -2686,8 +2686,8 @@ define <8 x float> @test_unpckhps(<8 x float> %a0, <8 x float> %a1, <8 x float>
; ZNVER1-LABEL: test_unpckhps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50]
-; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -2719,9 +2719,9 @@ define <4 x double> @test_unpcklpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
; ZNVER1-LABEL: test_unpcklpd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
-; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -2751,8 +2751,8 @@ define <8 x float> @test_unpcklps(<8 x float> %a0, <8 x float> %a1, <8 x float>
; ZNVER1-LABEL: test_unpcklps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50]
-; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -2783,10 +2783,10 @@ define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
;
; ZNVER1-LABEL: test_xorpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
%3 = xor <4 x i64> %1, %2
@@ -2822,10 +2822,10 @@ define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
;
; ZNVER1-LABEL: test_xorps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
%3 = xor <4 x i64> %1, %2
@@ -2856,7 +2856,7 @@ define void @test_zeroall() {
; ZNVER1-LABEL: test_zeroall:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vzeroall # sched: [?:0.000000e+00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
call void @llvm.x86.avx.vzeroall()
ret void
}
@@ -2881,7 +2881,7 @@ define void @test_zeroupper() {
; ZNVER1-LABEL: test_zeroupper:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
call void @llvm.x86.avx.vzeroupper()
ret void
}
diff --git a/test/CodeGen/X86/avx2-arith.ll b/test/CodeGen/X86/avx2-arith.ll
index 017f54b40b2d5..9918d66802564 100644
--- a/test/CodeGen/X86/avx2-arith.ll
+++ b/test/CodeGen/X86/avx2-arith.ll
@@ -386,13 +386,13 @@ define <8 x i32> @mul_const9(<8 x i32> %x) {
define <4 x i32> @mul_const10(<4 x i32> %x) {
; X32-LABEL: mul_const10:
; X32: # BB#0:
-; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
+; X32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const10:
; X64: # BB#0:
-; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%m = mul <4 x i32> %x, <i32 16843009, i32 16843009, i32 16843009, i32 16843009>
@@ -403,13 +403,13 @@ define <4 x i32> @mul_const10(<4 x i32> %x) {
define <4 x i32> @mul_const11(<4 x i32> %x) {
; X32-LABEL: mul_const11:
; X32: # BB#0:
-; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
+; X32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const11:
; X64: # BB#0:
-; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%m = mul <4 x i32> %x, <i32 2155905152, i32 2155905152, i32 2155905152, i32 2155905152>
diff --git a/test/CodeGen/X86/avx2-schedule.ll b/test/CodeGen/X86/avx2-schedule.ll
index 042bc217b97cf..a3862d7e27c66 100644
--- a/test/CodeGen/X86/avx2-schedule.ll
+++ b/test/CodeGen/X86/avx2-schedule.ll
@@ -13,10 +13,10 @@ define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) {
;
; ZNVER1-LABEL: test_pabsb:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [6:1.00]
-; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0)
%2 = load <32 x i8>, <32 x i8> *%a1, align 32
%3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2)
@@ -35,10 +35,10 @@ define <8 x i32> @test_pabsd(<8 x i32> %a0, <8 x i32> *%a1) {
;
; ZNVER1-LABEL: test_pabsd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [6:1.00]
-; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0)
%2 = load <8 x i32>, <8 x i32> *%a1, align 32
%3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2)
@@ -57,10 +57,10 @@ define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) {
;
; ZNVER1-LABEL: test_pabsw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [6:1.00]
-; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0)
%2 = load <16 x i16>, <16 x i16> *%a1, align 32
%3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2)
@@ -78,9 +78,9 @@ define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
;
; ZNVER1-LABEL: test_paddb:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <32 x i8> %a0, %a1
%2 = load <32 x i8>, <32 x i8> *%a2, align 32
%3 = add <32 x i8> %1, %2
@@ -96,9 +96,9 @@ define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
;
; ZNVER1-LABEL: test_paddd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <8 x i32> %a0, %a1
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
%3 = add <8 x i32> %1, %2
@@ -114,9 +114,9 @@ define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
;
; ZNVER1-LABEL: test_paddq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = add <4 x i64> %1, %2
@@ -132,9 +132,9 @@ define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
;
; ZNVER1-LABEL: test_paddw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <16 x i16> %a0, %a1
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
%3 = add <16 x i16> %1, %2
@@ -151,10 +151,10 @@ define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
;
; ZNVER1-LABEL: test_pand:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = and <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = and <4 x i64> %1, %2
@@ -172,10 +172,10 @@ define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
;
; ZNVER1-LABEL: test_pandn:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [6:1.00]
-; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1>
%2 = and <4 x i64> %a1, %1
%3 = load <4 x i64>, <4 x i64> *%a2, align 32
@@ -194,9 +194,9 @@ define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
;
; ZNVER1-LABEL: test_pmulld:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = mul <8 x i32> %a0, %a1
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
%3 = mul <8 x i32> %1, %2
@@ -212,9 +212,9 @@ define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2)
;
; ZNVER1-LABEL: test_pmullw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
-; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = mul <16 x i16> %a0, %a1
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
%3 = mul <16 x i16> %1, %2
@@ -231,10 +231,10 @@ define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
;
; ZNVER1-LABEL: test_por:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = or <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = or <4 x i64> %1, %2
@@ -251,9 +251,9 @@ define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
;
; ZNVER1-LABEL: test_psubb:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sub <32 x i8> %a0, %a1
%2 = load <32 x i8>, <32 x i8> *%a2, align 32
%3 = sub <32 x i8> %1, %2
@@ -269,9 +269,9 @@ define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
;
; ZNVER1-LABEL: test_psubd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sub <8 x i32> %a0, %a1
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
%3 = sub <8 x i32> %1, %2
@@ -287,9 +287,9 @@ define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
;
; ZNVER1-LABEL: test_psubq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sub <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = sub <4 x i64> %1, %2
@@ -305,9 +305,9 @@ define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
;
; ZNVER1-LABEL: test_psubw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sub <16 x i16> %a0, %a1
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
%3 = sub <16 x i16> %1, %2
@@ -324,10 +324,10 @@ define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
;
; ZNVER1-LABEL: test_pxor:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
-; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [4:1.00]
+; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = xor <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = xor <4 x i64> %1, %2
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index 127726ea30da1..c77714b9e181a 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -376,7 +376,7 @@ define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
; X32: # BB#0:
; X32-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm2
+; X32-NEXT: vpbroadcastd {{.*#+}} xmm2 = [8,8,8,8]
; X32-NEXT: vpand %xmm2, %xmm1, %xmm1
; X32-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
@@ -386,7 +386,7 @@ define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
; X64: # BB#0:
; X64-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; X64-NEXT: vpbroadcastd {{.*#+}} xmm2 = [8,8,8,8]
; X64-NEXT: vpand %xmm2, %xmm1, %xmm1
; X64-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X64-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index 140299f5495dc..e10a781fabc21 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -1507,7 +1507,7 @@ define <4 x float> @uitofp_4i1_float(<4 x i32> %a) {
; NOVL: # BB#0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
-; NOVL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; NOVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; NOVL-NEXT: vpand %xmm1, %xmm0, %xmm0
; NOVL-NEXT: retq
;
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index e1a92c60d1825..6f4bf061a2157 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -1630,8 +1630,9 @@ define void @f1(i32 %c) {
; CHECK-LABEL: f1:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movzbl {{.*}}(%rip), %edi
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: movb {{.*}}(%rip), %al
+; CHECK-NEXT: notb %al
+; CHECK-NEXT: andb $1, %al
; CHECK-NEXT: movb %al, {{.*}}(%rip)
; CHECK-NEXT: xorl $1, %edi
; CHECK-NEXT: jmp _f2 ## TAILCALL
diff --git a/test/CodeGen/X86/avx512-rotate.ll b/test/CodeGen/X86/avx512-rotate.ll
new file mode 100644
index 0000000000000..98fa67ad793d9
--- /dev/null
+++ b/test/CodeGen/X86/avx512-rotate.ll
@@ -0,0 +1,256 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=KNL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
+
+declare <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+declare <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+; Tests showing replacement of variable rotates with immediate splat versions.
+
+define <16 x i32> @test_splat_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
+; KNL-LABEL: test_splat_rol_v16i32:
+; KNL: # BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vprold $5, %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vprold $5, %zmm0, %zmm2 {%k1} {z}
+; KNL-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vprold $5, %zmm0, %zmm0
+; KNL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_splat_rol_v16i32:
+; SKX: # BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vprold $5, %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vprold $5, %zmm0, %zmm2 {%k1} {z}
+; SKX-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vprold $5, %zmm0, %zmm0
+; SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> %x1, i16 %x2)
+ %res1 = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> zeroinitializer, i16 %x2)
+ %res2 = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> %x1, i16 -1)
+ %res3 = add <16 x i32> %res, %res1
+ %res4 = add <16 x i32> %res3, %res2
+ ret <16 x i32> %res4
+}
+
+define <8 x i64>@test_splat_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
+; KNL-LABEL: test_splat_rol_v8i64:
+; KNL: # BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vprolq $5, %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vprolq $5, %zmm0, %zmm2 {%k1} {z}
+; KNL-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vprolq $5, %zmm0, %zmm0
+; KNL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_splat_rol_v8i64:
+; SKX: # BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vprolq $5, %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vprolq $5, %zmm0, %zmm2 {%k1} {z}
+; SKX-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vprolq $5, %zmm0, %zmm0
+; SKX-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> %x1, i8 %x2)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> zeroinitializer, i8 %x2)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> %x1, i8 -1)
+ %res3 = add <8 x i64> %res, %res1
+ %res4 = add <8 x i64> %res3, %res2
+ ret <8 x i64> %res4
+}
+
+define <16 x i32> @test_splat_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
+; KNL-LABEL: test_splat_ror_v16i32:
+; KNL: # BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vprord $5, %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vprord $5, %zmm0, %zmm2 {%k1} {z}
+; KNL-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vprord $5, %zmm0, %zmm0
+; KNL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_splat_ror_v16i32:
+; SKX: # BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vprord $5, %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vprord $5, %zmm0, %zmm2 {%k1} {z}
+; SKX-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vprord $5, %zmm0, %zmm0
+; SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> %x1, i16 %x2)
+ %res1 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> zeroinitializer, i16 %x2)
+ %res2 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> %x1, i16 -1)
+ %res3 = add <16 x i32> %res, %res1
+ %res4 = add <16 x i32> %res3, %res2
+ ret <16 x i32> %res4
+}
+
+define <8 x i64>@test_splat_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
+; KNL-LABEL: test_splat_ror_v8i64:
+; KNL: # BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vprorq $5, %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vprorq $5, %zmm0, %zmm2 {%k1} {z}
+; KNL-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vprorq $5, %zmm0, %zmm0
+; KNL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_splat_ror_v8i64:
+; SKX: # BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vprorq $5, %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vprorq $5, %zmm0, %zmm2 {%k1} {z}
+; SKX-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vprorq $5, %zmm0, %zmm0
+; SKX-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> %x1, i8 %x2)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> zeroinitializer, i8 %x2)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> %x1, i8 -1)
+ %res3 = add <8 x i64> %res, %res1
+ %res4 = add <8 x i64> %res3, %res2
+ ret <8 x i64> %res4
+}
+
+; Tests showing replacement of out-of-bounds variable rotates with in-bounds immediate splat versions.
+
+define <16 x i32> @test_splat_bounds_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
+; KNL-LABEL: test_splat_bounds_rol_v16i32:
+; KNL: # BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vprold $1, %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vprold $31, %zmm0, %zmm2 {%k1} {z}
+; KNL-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vprold $30, %zmm0, %zmm0
+; KNL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_splat_bounds_rol_v16i32:
+; SKX: # BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vprold $1, %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vprold $31, %zmm0, %zmm2 {%k1} {z}
+; SKX-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vprold $30, %zmm0, %zmm0
+; SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> <i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33>, <16 x i32> %x1, i16 %x2)
+ %res1 = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> zeroinitializer, i16 %x2)
+ %res2 = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> <i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534>, <16 x i32> %x1, i16 -1)
+ %res3 = add <16 x i32> %res, %res1
+ %res4 = add <16 x i32> %res3, %res2
+ ret <16 x i32> %res4
+}
+
+define <8 x i64>@test_splat_bounds_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
+; KNL-LABEL: test_splat_bounds_rol_v8i64:
+; KNL: # BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vprolq $62, %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vprolq $1, %zmm0, %zmm2 {%k1} {z}
+; KNL-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vprolq $63, %zmm0, %zmm0
+; KNL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_splat_bounds_rol_v8i64:
+; SKX: # BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vprolq $62, %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vprolq $1, %zmm0, %zmm2 {%k1} {z}
+; SKX-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vprolq $63, %zmm0, %zmm0
+; SKX-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> <i64 65534, i64 65534, i64 65534, i64 65534, i64 65534, i64 65534, i64 65534, i64 65534>, <8 x i64> %x1, i8 %x2)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> <i64 65, i64 65, i64 65, i64 65, i64 65, i64 65, i64 65, i64 65>, <8 x i64> zeroinitializer, i8 %x2)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> %x1, i8 -1)
+ %res3 = add <8 x i64> %res, %res1
+ %res4 = add <8 x i64> %res3, %res2
+ ret <8 x i64> %res4
+}
+
+define <16 x i32> @test_splat_bounds_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
+; KNL-LABEL: test_splat_bounds_ror_v16i32:
+; KNL: # BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vprord $1, %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vprord $31, %zmm0, %zmm2 {%k1} {z}
+; KNL-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vprord $30, %zmm0, %zmm0
+; KNL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_splat_bounds_ror_v16i32:
+; SKX: # BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vprord $1, %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vprord $31, %zmm0, %zmm2 {%k1} {z}
+; SKX-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vprord $30, %zmm0, %zmm0
+; SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> <i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33, i32 33>, <16 x i32> %x1, i16 %x2)
+ %res1 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> zeroinitializer, i16 %x2)
+ %res2 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> <i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534, i32 65534>, <16 x i32> %x1, i16 -1)
+ %res3 = add <16 x i32> %res, %res1
+ %res4 = add <16 x i32> %res3, %res2
+ ret <16 x i32> %res4
+}
+
+define <8 x i64>@test_splat_bounds_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
+; KNL-LABEL: test_splat_bounds_ror_v8i64:
+; KNL: # BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vprorq $62, %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vprorq $1, %zmm0, %zmm2 {%k1} {z}
+; KNL-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vprorq $63, %zmm0, %zmm0
+; KNL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_splat_bounds_ror_v8i64:
+; SKX: # BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vprorq $62, %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vprorq $1, %zmm0, %zmm2 {%k1} {z}
+; SKX-NEXT: vpaddq %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vprorq $63, %zmm0, %zmm0
+; SKX-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; SKX-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> <i64 65534, i64 65534, i64 65534, i64 65534, i64 65534, i64 65534, i64 65534, i64 65534>, <8 x i64> %x1, i8 %x2)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> <i64 65, i64 65, i64 65, i64 65, i64 65, i64 65, i64 65, i64 65>, <8 x i64> zeroinitializer, i8 %x2)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> %x1, i8 -1)
+ %res3 = add <8 x i64> %res, %res1
+ %res4 = add <8 x i64> %res3, %res2
+ ret <8 x i64> %res4
+}
+
+; Constant folding
+
+define <8 x i64> @test_fold_rol_v8i64() {
+; CHECK-LABEL: test_fold_rol_v8i64:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [1,2,4,9223372036854775808,2,4611686018427387904,9223372036854775808,9223372036854775808]
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, <8 x i64> <i64 0, i64 1, i64 2, i64 63, i64 65, i64 65534, i64 65535, i64 -1>, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_fold_ror_v8i64() {
+; CHECK-LABEL: test_fold_ror_v8i64:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [1,9223372036854775808,4611686018427387904,2,9223372036854775808,4,2,2]
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, <8 x i64> <i64 0, i64 1, i64 2, i64 63, i64 65, i64 65534, i64 65535, i64 -1>, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
diff --git a/test/CodeGen/X86/avx512-shift.ll b/test/CodeGen/X86/avx512-shift.ll
index 10883a5a9a625..ce2b010ec0f27 100644
--- a/test/CodeGen/X86/avx512-shift.ll
+++ b/test/CodeGen/X86/avx512-shift.ll
@@ -1,136 +1,178 @@
-;RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
-;RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck --check-prefix=SKX %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+;RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=KNL
+;RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
-;CHECK-LABEL: shift_16_i32
-;CHECK: vpsrld
-;CHECK: vpslld
-;CHECK: vpsrad
-;CHECK: ret
define <16 x i32> @shift_16_i32(<16 x i32> %a) {
+; CHECK-LABEL: shift_16_i32:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsrld $1, %zmm0, %zmm0
+; CHECK-NEXT: vpslld $12, %zmm0, %zmm0
+; CHECK-NEXT: vpsrad $12, %zmm0, %zmm0
+; CHECK-NEXT: retq
%b = lshr <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%c = shl <16 x i32> %b, <i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12>
%d = ashr <16 x i32> %c, <i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12>
ret <16 x i32> %d;
}
-;CHECK-LABEL: shift_8_i64
-;CHECK: vpsrlq
-;CHECK: vpsllq
-;CHECK: vpsraq
-;CHECK: ret
define <8 x i64> @shift_8_i64(<8 x i64> %a) {
+; CHECK-LABEL: shift_8_i64:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsrlq $1, %zmm0, %zmm0
+; CHECK-NEXT: vpsllq $12, %zmm0, %zmm0
+; CHECK-NEXT: vpsraq $12, %zmm0, %zmm0
+; CHECK-NEXT: retq
%b = lshr <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
%c = shl <8 x i64> %b, <i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12>
%d = ashr <8 x i64> %c, <i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12>
ret <8 x i64> %d;
}
-;SKX-LABEL: shift_4_i64
-;SKX: vpsrlq
-;SKX: vpsllq
-;SKX: vpsraq
-;SKX: ret
define <4 x i64> @shift_4_i64(<4 x i64> %a) {
+; KNL-LABEL: shift_4_i64:
+; KNL: # BB#0:
+; KNL-NEXT: vpsrlq $1, %ymm0, %ymm0
+; KNL-NEXT: vpsllq $12, %ymm0, %ymm0
+; KNL-NEXT: vpsraq $12, %zmm0, %zmm0
+; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: shift_4_i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpsrlq $1, %ymm0, %ymm0
+; SKX-NEXT: vpsllq $12, %ymm0, %ymm0
+; SKX-NEXT: vpsraq $12, %ymm0, %ymm0
+; SKX-NEXT: retq
%b = lshr <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
%c = shl <4 x i64> %b, <i64 12, i64 12, i64 12, i64 12>
%d = ashr <4 x i64> %c, <i64 12, i64 12, i64 12, i64 12>
ret <4 x i64> %d;
}
-; CHECK-LABEL: variable_shl4
-; CHECK: vpsllvq %zmm
-; CHECK: ret
define <8 x i64> @variable_shl4(<8 x i64> %x, <8 x i64> %y) {
+; CHECK-LABEL: variable_shl4:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: retq
%k = shl <8 x i64> %x, %y
ret <8 x i64> %k
}
-; CHECK-LABEL: variable_shl5
-; CHECK: vpsllvd %zmm
-; CHECK: ret
define <16 x i32> @variable_shl5(<16 x i32> %x, <16 x i32> %y) {
+; CHECK-LABEL: variable_shl5:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: retq
%k = shl <16 x i32> %x, %y
ret <16 x i32> %k
}
-; CHECK-LABEL: variable_srl0
-; CHECK: vpsrlvd
-; CHECK: ret
define <16 x i32> @variable_srl0(<16 x i32> %x, <16 x i32> %y) {
+; CHECK-LABEL: variable_srl0:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: retq
%k = lshr <16 x i32> %x, %y
ret <16 x i32> %k
}
-; CHECK-LABEL: variable_srl2
-; CHECK: psrlvq
-; CHECK: ret
define <8 x i64> @variable_srl2(<8 x i64> %x, <8 x i64> %y) {
+; CHECK-LABEL: variable_srl2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: retq
%k = lshr <8 x i64> %x, %y
ret <8 x i64> %k
}
-; CHECK-LABEL: variable_sra1
-; CHECK: vpsravd
-; CHECK: ret
define <16 x i32> @variable_sra1(<16 x i32> %x, <16 x i32> %y) {
+; CHECK-LABEL: variable_sra1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: retq
%k = ashr <16 x i32> %x, %y
ret <16 x i32> %k
}
-; CHECK-LABEL: variable_sra2
-; CHECK: vpsravq %zmm
-; CHECK: ret
define <8 x i64> @variable_sra2(<8 x i64> %x, <8 x i64> %y) {
+; CHECK-LABEL: variable_sra2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: retq
%k = ashr <8 x i64> %x, %y
ret <8 x i64> %k
}
-; SKX-LABEL: variable_sra3
-; SKX: vpsravq %ymm
-; SKX: ret
define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
+; KNL-LABEL: variable_sra3:
+; KNL: # BB#0:
+; KNL-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
+; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: variable_sra3:
+; SKX: # BB#0:
+; SKX-NEXT: vpsravq %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
%k = ashr <4 x i64> %x, %y
ret <4 x i64> %k
}
-; SKX-LABEL: variable_sra4
-; SKX: vpsravw %xmm
-; SKX: ret
define <8 x i16> @variable_sra4(<8 x i16> %x, <8 x i16> %y) {
+; KNL-LABEL: variable_sra4:
+; KNL: # BB#0:
+; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; KNL-NEXT: vpmovsxwd %xmm0, %ymm0
+; KNL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
+; KNL-NEXT: vpmovdw %zmm0, %ymm0
+; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: variable_sra4:
+; SKX: # BB#0:
+; SKX-NEXT: vpsravw %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
%k = ashr <8 x i16> %x, %y
ret <8 x i16> %k
}
-; CHECK-LABEL: variable_sra01_load
-; CHECK: vpsravd (%
-; CHECK: ret
define <16 x i32> @variable_sra01_load(<16 x i32> %x, <16 x i32>* %y) {
+; CHECK-LABEL: variable_sra01_load:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsravd (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: retq
%y1 = load <16 x i32>, <16 x i32>* %y
%k = ashr <16 x i32> %x, %y1
ret <16 x i32> %k
}
-; CHECK-LABEL: variable_shl1_load
-; CHECK: vpsllvd (%
-; CHECK: ret
define <16 x i32> @variable_shl1_load(<16 x i32> %x, <16 x i32>* %y) {
+; CHECK-LABEL: variable_shl1_load:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsllvd (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: retq
%y1 = load <16 x i32>, <16 x i32>* %y
%k = shl <16 x i32> %x, %y1
ret <16 x i32> %k
}
-; CHECK: variable_srl0_load
-; CHECK: vpsrlvd (%
-; CHECK: ret
+
define <16 x i32> @variable_srl0_load(<16 x i32> %x, <16 x i32>* %y) {
+; CHECK-LABEL: variable_srl0_load:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsrlvd (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: retq
%y1 = load <16 x i32>, <16 x i32>* %y
%k = lshr <16 x i32> %x, %y1
ret <16 x i32> %k
}
-; CHECK: variable_srl3_load
-; CHECK: vpsrlvq (%
-; CHECK: ret
define <8 x i64> @variable_srl3_load(<8 x i64> %x, <8 x i64>* %y) {
+; CHECK-LABEL: variable_srl3_load:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsrlvq (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: retq
%y1 = load <8 x i64>, <8 x i64>* %y
%k = lshr <8 x i64> %x, %y1
ret <8 x i64> %k
diff --git a/test/CodeGen/X86/bmi-schedule.ll b/test/CodeGen/X86/bmi-schedule.ll
new file mode 100644
index 0000000000000..75be2d9c0f01e
--- /dev/null
+++ b/test/CodeGen/X86/bmi-schedule.ll
@@ -0,0 +1,529 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mattr=+bmi | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
+; GENERIC-LABEL: test_andn_i16:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: andnl %esi, %edi, %eax
+; GENERIC-NEXT: notl %edi
+; GENERIC-NEXT: andw (%rdx), %di
+; GENERIC-NEXT: addl %edi, %eax
+; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_andn_i16:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: andnl %esi, %edi, %eax # sched: [1:0.50]
+; HASWELL-NEXT: notl %edi # sched: [1:0.25]
+; HASWELL-NEXT: andw (%rdx), %di # sched: [5:0.50]
+; HASWELL-NEXT: addl %edi, %eax # sched: [1:0.25]
+; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_andn_i16:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: andnl %esi, %edi, %eax # sched: [1:0.50]
+; BTVER2-NEXT: notl %edi # sched: [1:0.50]
+; BTVER2-NEXT: andw (%rdx), %di # sched: [4:1.00]
+; BTVER2-NEXT: addl %edi, %eax # sched: [1:0.50]
+; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_andn_i16:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: andnl %esi, %edi, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: notl %edi # sched: [1:0.25]
+; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50]
+; ZNVER1-NEXT: addl %edi, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i16, i16 *%a2
+ %2 = xor i16 %a0, -1
+ %3 = and i16 %2, %a1
+ %4 = and i16 %2, %1
+ %5 = add i16 %3, %4
+ ret i16 %5
+}
+
+define i32 @test_andn_i32(i32 %a0, i32 %a1, i32 *%a2) {
+; GENERIC-LABEL: test_andn_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: andnl %esi, %edi, %ecx
+; GENERIC-NEXT: andnl (%rdx), %edi, %eax
+; GENERIC-NEXT: addl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_andn_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.50]
+; HASWELL-NEXT: andnl (%rdx), %edi, %eax # sched: [4:0.50]
+; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_andn_i32:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: andnl (%rdx), %edi, %eax # sched: [4:1.00]
+; BTVER2-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.50]
+; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_andn_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: andnl (%rdx), %edi, %eax # sched: [5:0.50]
+; ZNVER1-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.25]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a2
+ %2 = xor i32 %a0, -1
+ %3 = and i32 %2, %a1
+ %4 = and i32 %2, %1
+ %5 = add i32 %3, %4
+ ret i32 %5
+}
+
+define i64 @test_andn_i64(i64 %a0, i64 %a1, i64 *%a2) {
+; GENERIC-LABEL: test_andn_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: andnq %rsi, %rdi, %rcx
+; GENERIC-NEXT: andnq (%rdx), %rdi, %rax
+; GENERIC-NEXT: addq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_andn_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.50]
+; HASWELL-NEXT: andnq (%rdx), %rdi, %rax # sched: [4:0.50]
+; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_andn_i64:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: andnq (%rdx), %rdi, %rax # sched: [4:1.00]
+; BTVER2-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.50]
+; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_andn_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: andnq (%rdx), %rdi, %rax # sched: [5:0.50]
+; ZNVER1-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.25]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a2
+ %2 = xor i64 %a0, -1
+ %3 = and i64 %2, %a1
+ %4 = and i64 %2, %1
+ %5 = add i64 %3, %4
+ ret i64 %5
+}
+
+define i32 @test_bextr_i32(i32 %a0, i32 %a1, i32 *%a2) {
+; GENERIC-LABEL: test_bextr_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: bextrl %edi, (%rdx), %ecx
+; GENERIC-NEXT: bextrl %edi, %esi, %eax
+; GENERIC-NEXT: addl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_bextr_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: bextrl %edi, (%rdx), %ecx # sched: [6:0.50]
+; HASWELL-NEXT: bextrl %edi, %esi, %eax # sched: [2:0.50]
+; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_bextr_i32:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: bextrl %edi, (%rdx), %ecx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: bextrl %edi, %esi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_bextr_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: bextrl %edi, (%rdx), %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: bextrl %edi, %esi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a2
+ %2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %1, i32 %a0)
+ %3 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a1, i32 %a0)
+ %4 = add i32 %2, %3
+ ret i32 %4
+}
+declare i32 @llvm.x86.bmi.bextr.32(i32, i32)
+
+define i64 @test_bextr_i64(i64 %a0, i64 %a1, i64 *%a2) {
+; GENERIC-LABEL: test_bextr_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: bextrq %rdi, (%rdx), %rcx
+; GENERIC-NEXT: bextrq %rdi, %rsi, %rax
+; GENERIC-NEXT: addq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_bextr_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [6:0.50]
+; HASWELL-NEXT: bextrq %rdi, %rsi, %rax # sched: [2:0.50]
+; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_bextr_i64:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: bextrq %rdi, %rsi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_bextr_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: bextrq %rdi, %rsi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a2
+ %2 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %1, i64 %a0)
+ %3 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a1, i64 %a0)
+ %4 = add i64 %2, %3
+ ret i64 %4
+}
+declare i64 @llvm.x86.bmi.bextr.64(i64, i64)
+
+define i32 @test_blsi_i32(i32 %a0, i32 *%a1) {
+; GENERIC-LABEL: test_blsi_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: blsil (%rsi), %ecx
+; GENERIC-NEXT: blsil %edi, %eax
+; GENERIC-NEXT: addl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_blsi_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: blsil (%rsi), %ecx # sched: [4:0.50]
+; HASWELL-NEXT: blsil %edi, %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_blsi_i32:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: blsil (%rsi), %ecx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: blsil %edi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blsi_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: blsil (%rsi), %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: blsil %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a1
+ %2 = sub i32 0, %1
+ %3 = sub i32 0, %a0
+ %4 = and i32 %1, %2
+ %5 = and i32 %a0, %3
+ %6 = add i32 %4, %5
+ ret i32 %6
+}
+
+define i64 @test_blsi_i64(i64 %a0, i64 *%a1) {
+; GENERIC-LABEL: test_blsi_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: blsiq (%rsi), %rcx
+; GENERIC-NEXT: blsiq %rdi, %rax
+; GENERIC-NEXT: addq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_blsi_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: blsiq (%rsi), %rcx # sched: [4:0.50]
+; HASWELL-NEXT: blsiq %rdi, %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_blsi_i64:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: blsiq (%rsi), %rcx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: blsiq %rdi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blsi_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: blsiq (%rsi), %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: blsiq %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a1
+ %2 = sub i64 0, %1
+ %3 = sub i64 0, %a0
+ %4 = and i64 %1, %2
+ %5 = and i64 %a0, %3
+ %6 = add i64 %4, %5
+ ret i64 %6
+}
+
+define i32 @test_blsmsk_i32(i32 %a0, i32 *%a1) {
+; GENERIC-LABEL: test_blsmsk_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: blsmskl (%rsi), %ecx
+; GENERIC-NEXT: blsmskl %edi, %eax
+; GENERIC-NEXT: addl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_blsmsk_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: blsmskl (%rsi), %ecx # sched: [4:0.50]
+; HASWELL-NEXT: blsmskl %edi, %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_blsmsk_i32:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: blsmskl (%rsi), %ecx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: blsmskl %edi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blsmsk_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: blsmskl (%rsi), %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: blsmskl %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a1
+ %2 = sub i32 %1, 1
+ %3 = sub i32 %a0, 1
+ %4 = xor i32 %1, %2
+ %5 = xor i32 %a0, %3
+ %6 = add i32 %4, %5
+ ret i32 %6
+}
+
+define i64 @test_blsmsk_i64(i64 %a0, i64 *%a1) {
+; GENERIC-LABEL: test_blsmsk_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: blsmskq (%rsi), %rcx
+; GENERIC-NEXT: blsmskq %rdi, %rax
+; GENERIC-NEXT: addq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_blsmsk_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: blsmskq (%rsi), %rcx # sched: [4:0.50]
+; HASWELL-NEXT: blsmskq %rdi, %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_blsmsk_i64:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: blsmskq (%rsi), %rcx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: blsmskq %rdi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blsmsk_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: blsmskq (%rsi), %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: blsmskq %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a1
+ %2 = sub i64 %1, 1
+ %3 = sub i64 %a0, 1
+ %4 = xor i64 %1, %2
+ %5 = xor i64 %a0, %3
+ %6 = add i64 %4, %5
+ ret i64 %6
+}
+
+define i32 @test_blsr_i32(i32 %a0, i32 *%a1) {
+; GENERIC-LABEL: test_blsr_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: blsrl (%rsi), %ecx
+; GENERIC-NEXT: blsrl %edi, %eax
+; GENERIC-NEXT: addl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_blsr_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: blsrl (%rsi), %ecx # sched: [4:0.50]
+; HASWELL-NEXT: blsrl %edi, %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_blsr_i32:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: blsrl (%rsi), %ecx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: blsrl %edi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blsr_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: blsrl (%rsi), %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: blsrl %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a1
+ %2 = sub i32 %1, 1
+ %3 = sub i32 %a0, 1
+ %4 = and i32 %1, %2
+ %5 = and i32 %a0, %3
+ %6 = add i32 %4, %5
+ ret i32 %6
+}
+
+define i64 @test_blsr_i64(i64 %a0, i64 *%a1) {
+; GENERIC-LABEL: test_blsr_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: blsrq (%rsi), %rcx
+; GENERIC-NEXT: blsrq %rdi, %rax
+; GENERIC-NEXT: addq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_blsr_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: blsrq (%rsi), %rcx # sched: [4:0.50]
+; HASWELL-NEXT: blsrq %rdi, %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_blsr_i64:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: blsrq (%rsi), %rcx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: blsrq %rdi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blsr_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: blsrq (%rsi), %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: blsrq %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a1
+ %2 = sub i64 %1, 1
+ %3 = sub i64 %a0, 1
+ %4 = and i64 %1, %2
+ %5 = and i64 %a0, %3
+ %6 = add i64 %4, %5
+ ret i64 %6
+}
+
+define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
+; GENERIC-LABEL: test_cttz_i16:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: tzcntw (%rsi), %cx
+; GENERIC-NEXT: tzcntw %di, %ax
+; GENERIC-NEXT: orl %ecx, %eax
+; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_cttz_i16:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: tzcntw (%rsi), %cx # sched: [7:1.00]
+; HASWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
+; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_cttz_i16:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: tzcntw (%rsi), %cx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: tzcntw %di, %ax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cttz_i16:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: tzcntw (%rsi), %cx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: tzcntw %di, %ax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i16, i16 *%a1
+ %2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false )
+ %3 = tail call i16 @llvm.cttz.i16( i16 %a0, i1 false )
+ %4 = or i16 %2, %3
+ ret i16 %4
+}
+declare i16 @llvm.cttz.i16(i16, i1)
+
+define i32 @test_cttz_i32(i32 %a0, i32 *%a1) {
+; GENERIC-LABEL: test_cttz_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: tzcntl (%rsi), %ecx
+; GENERIC-NEXT: tzcntl %edi, %eax
+; GENERIC-NEXT: orl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_cttz_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: tzcntl (%rsi), %ecx # sched: [7:1.00]
+; HASWELL-NEXT: tzcntl %edi, %eax # sched: [3:1.00]
+; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_cttz_i32:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: tzcntl (%rsi), %ecx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: tzcntl %edi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cttz_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: tzcntl (%rsi), %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: tzcntl %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a1
+ %2 = tail call i32 @llvm.cttz.i32( i32 %1, i1 false )
+ %3 = tail call i32 @llvm.cttz.i32( i32 %a0, i1 false )
+ %4 = or i32 %2, %3
+ ret i32 %4
+}
+declare i32 @llvm.cttz.i32(i32, i1)
+
+define i64 @test_cttz_i64(i64 %a0, i64 *%a1) {
+; GENERIC-LABEL: test_cttz_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: tzcntq (%rsi), %rcx
+; GENERIC-NEXT: tzcntq %rdi, %rax
+; GENERIC-NEXT: orq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_cttz_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: tzcntq (%rsi), %rcx # sched: [7:1.00]
+; HASWELL-NEXT: tzcntq %rdi, %rax # sched: [3:1.00]
+; HASWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_cttz_i64:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: tzcntq (%rsi), %rcx # sched: [?:0.000000e+00]
+; BTVER2-NEXT: tzcntq %rdi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT: orq %rcx, %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cttz_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: tzcntq (%rsi), %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: tzcntq %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a1
+ %2 = tail call i64 @llvm.cttz.i64( i64 %1, i1 false )
+ %3 = tail call i64 @llvm.cttz.i64( i64 %a0, i1 false )
+ %4 = or i64 %2, %3
+ ret i64 %4
+}
+declare i64 @llvm.cttz.i64(i64, i1)
diff --git a/test/CodeGen/X86/bmi2-schedule.ll b/test/CodeGen/X86/bmi2-schedule.ll
new file mode 100644
index 0000000000000..9666dd85d8535
--- /dev/null
+++ b/test/CodeGen/X86/bmi2-schedule.ll
@@ -0,0 +1,180 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mattr=+bmi2 | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define i32 @test_bzhi_i32(i32 %a0, i32 %a1, i32 *%a2) {
+; GENERIC-LABEL: test_bzhi_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: bzhil %edi, (%rdx), %ecx
+; GENERIC-NEXT: bzhil %edi, %esi, %eax
+; GENERIC-NEXT: addl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_bzhi_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: bzhil %edi, (%rdx), %ecx # sched: [4:0.50]
+; HASWELL-NEXT: bzhil %edi, %esi, %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_bzhi_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: bzhil %edi, (%rdx), %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: bzhil %edi, %esi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a2
+ %2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %1, i32 %a0)
+ %3 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a1, i32 %a0)
+ %4 = add i32 %2, %3
+ ret i32 %4
+}
+declare i32 @llvm.x86.bmi.bzhi.32(i32, i32)
+
+define i64 @test_bzhi_i64(i64 %a0, i64 %a1, i64 *%a2) {
+; GENERIC-LABEL: test_bzhi_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: bzhiq %rdi, (%rdx), %rcx
+; GENERIC-NEXT: bzhiq %rdi, %rsi, %rax
+; GENERIC-NEXT: addq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_bzhi_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [4:0.50]
+; HASWELL-NEXT: bzhiq %rdi, %rsi, %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_bzhi_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: bzhiq %rdi, %rsi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a2
+ %2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %1, i64 %a0)
+ %3 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a1, i64 %a0)
+ %4 = add i64 %2, %3
+ ret i64 %4
+}
+declare i64 @llvm.x86.bmi.bzhi.64(i64, i64)
+
+define i32 @test_pdep_i32(i32 %a0, i32 %a1, i32 *%a2) {
+; GENERIC-LABEL: test_pdep_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: pdepl (%rdx), %edi, %ecx
+; GENERIC-NEXT: pdepl %esi, %edi, %eax
+; GENERIC-NEXT: addl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_pdep_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: pdepl (%rdx), %edi, %ecx # sched: [7:1.00]
+; HASWELL-NEXT: pdepl %esi, %edi, %eax # sched: [3:1.00]
+; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pdep_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: pdepl %esi, %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a2
+ %2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %1)
+ %3 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1)
+ %4 = add i32 %2, %3
+ ret i32 %4
+}
+declare i32 @llvm.x86.bmi.pdep.32(i32, i32)
+
+define i64 @test_pdep_i64(i64 %a0, i64 %a1, i64 *%a2) {
+; GENERIC-LABEL: test_pdep_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: pdepq (%rdx), %rdi, %rcx
+; GENERIC-NEXT: pdepq %rsi, %rdi, %rax
+; GENERIC-NEXT: addq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_pdep_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [7:1.00]
+; HASWELL-NEXT: pdepq %rsi, %rdi, %rax # sched: [3:1.00]
+; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pdep_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a2
+ %2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %1)
+ %3 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1)
+ %4 = add i64 %2, %3
+ ret i64 %4
+}
+declare i64 @llvm.x86.bmi.pdep.64(i64, i64)
+
+define i32 @test_pext_i32(i32 %a0, i32 %a1, i32 *%a2) {
+; GENERIC-LABEL: test_pext_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: pextl (%rdx), %edi, %ecx
+; GENERIC-NEXT: pextl %esi, %edi, %eax
+; GENERIC-NEXT: addl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_pext_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: pextl (%rdx), %edi, %ecx # sched: [7:1.00]
+; HASWELL-NEXT: pextl %esi, %edi, %eax # sched: [3:1.00]
+; HASWELL-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pext_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: pextl %esi, %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a2
+ %2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %1)
+ %3 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1)
+ %4 = add i32 %2, %3
+ ret i32 %4
+}
+declare i32 @llvm.x86.bmi.pext.32(i32, i32)
+
+define i64 @test_pext_i64(i64 %a0, i64 %a1, i64 *%a2) {
+; GENERIC-LABEL: test_pext_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: pextq (%rdx), %rdi, %rcx
+; GENERIC-NEXT: pextq %rsi, %rdi, %rax
+; GENERIC-NEXT: addq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_pext_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: pextq (%rdx), %rdi, %rcx # sched: [7:1.00]
+; HASWELL-NEXT: pextq %rsi, %rdi, %rax # sched: [3:1.00]
+; HASWELL-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pext_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: pextq %rsi, %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a2
+ %2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %1)
+ %3 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1)
+ %4 = add i64 %2, %3
+ ret i64 %4
+}
+declare i64 @llvm.x86.bmi.pext.64(i64, i64)
diff --git a/test/CodeGen/X86/bool-ext-inc.ll b/test/CodeGen/X86/bool-ext-inc.ll
index e292ccd0be11d..7c1042878d591 100644
--- a/test/CodeGen/X86/bool-ext-inc.ll
+++ b/test/CodeGen/X86/bool-ext-inc.ll
@@ -19,7 +19,7 @@ define i32 @sext_inc(i1 zeroext %x) nounwind {
define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind {
; CHECK-LABEL: sext_inc_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
+; CHECK-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%ext = sext <4 x i1> %x to <4 x i32>
@@ -31,7 +31,7 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: cmpgt_sext_inc_vec:
; CHECK: # BB#0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, %y
@@ -56,7 +56,7 @@ define <4 x i64> @cmpgt_sext_inc_vec256(<4 x i64> %x, <4 x i64> %y) nounwind {
; CHECK-LABEL: cmpgt_sext_inc_vec256:
; CHECK: # BB#0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i64> %x, %y
@@ -91,7 +91,7 @@ define <4 x i32> @bool_logic_and_math_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpxor %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp1 = icmp ne <4 x i32> %a, %b
diff --git a/test/CodeGen/X86/bswap-rotate.ll b/test/CodeGen/X86/bswap-rotate.ll
new file mode 100644
index 0000000000000..f686febe5645d
--- /dev/null
+++ b/test/CodeGen/X86/bswap-rotate.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=i686 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+
+; Combine BSWAP (lowered to rolw 8) with a second rotate.
+; This test checks for combining rotates with inconsistent constant value types.
+
+define i16 @combine_bswap_rotate(i16 %a0) {
+; X86-LABEL: combine_bswap_rotate:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rolw $9, %ax
+; X86-NEXT: retl
+;
+; X64-LABEL: combine_bswap_rotate:
+; X64: # BB#0:
+; X64-NEXT: rolw $9, %di
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %1 = call i16 @llvm.bswap.i16(i16 %a0)
+ %2 = shl i16 %1, 1
+ %3 = lshr i16 %1, 15
+ %4 = or i16 %2, %3
+ ret i16 %4
+}
+
+declare i16 @llvm.bswap.i16(i16)
diff --git a/test/CodeGen/X86/clobber-fi0.ll b/test/CodeGen/X86/clobber-fi0.ll
index 02f1a1616db2d..b69b18531601a 100644
--- a/test/CodeGen/X86/clobber-fi0.ll
+++ b/test/CodeGen/X86/clobber-fi0.ll
@@ -15,22 +15,22 @@ bb:
%tmp = alloca i32, align 4 ; [#uses=3 type=i32*]
%tmp2 = alloca i32, align 4 ; [#uses=3 type=i32*]
%tmp3 = alloca i32 ; [#uses=1 type=i32*]
- store i32 1, i32* %tmp, align 4
- store i32 1, i32* %tmp2, align 4
+ store volatile i32 1, i32* %tmp, align 4
+ store volatile i32 1, i32* %tmp2, align 4
br label %bb4
bb4: ; preds = %bb4, %bb
- %tmp6 = load i32, i32* %tmp2, align 4 ; [#uses=1 type=i32]
+ %tmp6 = load volatile i32, i32* %tmp2, align 4 ; [#uses=1 type=i32]
%tmp7 = add i32 %tmp6, -1 ; [#uses=2 type=i32]
- store i32 %tmp7, i32* %tmp2, align 4
+ store volatile i32 %tmp7, i32* %tmp2, align 4
%tmp8 = icmp eq i32 %tmp7, 0 ; [#uses=1 type=i1]
- %tmp9 = load i32, i32* %tmp ; [#uses=1 type=i32]
+ %tmp9 = load volatile i32, i32* %tmp ; [#uses=1 type=i32]
%tmp10 = add i32 %tmp9, -1 ; [#uses=1 type=i32]
- store i32 %tmp10, i32* %tmp3
+ store volatile i32 %tmp10, i32* %tmp3
br i1 %tmp8, label %bb11, label %bb4
bb11: ; preds = %bb4
- %tmp12 = load i32, i32* %tmp, align 4 ; [#uses=1 type=i32]
+ %tmp12 = load volatile i32, i32* %tmp, align 4 ; [#uses=1 type=i32]
ret i32 %tmp12
}
diff --git a/test/CodeGen/X86/combine-rotates.ll b/test/CodeGen/X86/combine-rotates.ll
index 713ee5d0f65a9..0d74c937af33e 100644
--- a/test/CodeGen/X86/combine-rotates.ll
+++ b/test/CodeGen/X86/combine-rotates.ll
@@ -6,22 +6,12 @@
define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
; XOP-LABEL: combine_vec_rot_rot:
; XOP: # BB#0:
-; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
-; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
-; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: combine_vec_rot_rot:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
-; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
-; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vprolvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
%2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
@@ -40,12 +30,7 @@ define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
;
; AVX512-LABEL: combine_vec_rot_rot_splat:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsrld $3, %xmm0, %xmm1
-; AVX512-NEXT: vpslld $29, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vpsrld $22, %xmm0, %xmm1
-; AVX512-NEXT: vpslld $10, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vprold $7, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
%2 = shl <4 x i32> %x, <i32 29, i32 29, i32 29, i32 29>
@@ -63,12 +48,6 @@ define <4 x i32> @combine_vec_rot_rot_splat_zero(<4 x i32> %x) {
;
; AVX512-LABEL: combine_vec_rot_rot_splat_zero:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsrld $1, %xmm0, %xmm1
-; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vpsrld $31, %xmm0, %xmm1
-; AVX512-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
%2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
diff --git a/test/CodeGen/X86/combine-shl.ll b/test/CodeGen/X86/combine-shl.ll
index 3dbff2680c22f..a6491a0a86940 100644
--- a/test/CodeGen/X86/combine-shl.ll
+++ b/test/CodeGen/X86/combine-shl.ll
@@ -392,7 +392,7 @@ define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_shl_gt_lshr0:
; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
; AVX-NEXT: vpslld $2, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -437,7 +437,7 @@ define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_shl_le_lshr0:
; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1073741816,1073741816,1073741816,1073741816]
; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -481,7 +481,7 @@ define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_shl_ashr0:
; AVX: # BB#0:
-; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
@@ -515,7 +515,7 @@ define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
; AVX-LABEL: combine_vec_shl_add0:
; AVX: # BB#0:
; AVX-NEXT: vpslld $2, %xmm0, %xmm0
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = add <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
@@ -550,7 +550,7 @@ define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_shl_or0:
; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [5,5,5,5]
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpslld $2, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -585,7 +585,7 @@ define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_shl_mul0:
; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
diff --git a/test/CodeGen/X86/combine-srl.ll b/test/CodeGen/X86/combine-srl.ll
index 21564cdd73530..473fae19f4fd6 100644
--- a/test/CodeGen/X86/combine-srl.ll
+++ b/test/CodeGen/X86/combine-srl.ll
@@ -91,7 +91,7 @@ define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_lshr_known_zero1:
; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -326,7 +326,7 @@ define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_lshr_shl_mask0:
; AVX: # BB#0:
-; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1073741823,1073741823,1073741823,1073741823]
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
@@ -376,10 +376,10 @@ define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
;
; AVX-LABEL: combine_vec_lshr_lzcnt_bit0:
; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $4, %xmm0, %xmm0
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = and <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
diff --git a/test/CodeGen/X86/combine-udiv.ll b/test/CodeGen/X86/combine-udiv.ll
index e1e849929405a..b6ae2fa6d1578 100644
--- a/test/CodeGen/X86/combine-udiv.ll
+++ b/test/CodeGen/X86/combine-udiv.ll
@@ -166,7 +166,7 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
;
; AVX2-LABEL: combine_vec_udiv_by_shl_pow2a:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2]
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/combine-urem.ll b/test/CodeGen/X86/combine-urem.ll
index 91da268a8d75a..4c7716bbaebed 100644
--- a/test/CodeGen/X86/combine-urem.ll
+++ b/test/CodeGen/X86/combine-urem.ll
@@ -43,7 +43,7 @@ define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) {
;
; AVX2-LABEL: combine_vec_urem_by_pow2a:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [3,3,3,3]
; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = urem <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
@@ -87,7 +87,7 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
;
; AVX2-LABEL: combine_vec_urem_by_pow2c:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -146,7 +146,7 @@ define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) {
;
; AVX2-LABEL: combine_vec_urem_by_pow2d:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
@@ -183,7 +183,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
;
; AVX2-LABEL: combine_vec_urem_by_shl_pow2a:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4,4,4,4]
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/f16c-schedule.ll b/test/CodeGen/X86/f16c-schedule.ll
new file mode 100644
index 0000000000000..15ae4a49d7d32
--- /dev/null
+++ b/test/CodeGen/X86/f16c-schedule.ll
@@ -0,0 +1,144 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=ivybridge | FileCheck %s --check-prefix=CHECK --check-prefix=IVY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define <4 x float> @test_vcvtph2ps_128(<8 x i16> %a0, <8 x i16> *%a1) {
+; IVY-LABEL: test_vcvtph2ps_128:
+; IVY: # BB#0:
+; IVY-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [7:1.00]
+; IVY-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [3:1.00]
+; IVY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; IVY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_vcvtph2ps_128:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [7:1.00]
+; HASWELL-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [4:1.00]
+; HASWELL-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_vcvtph2ps_128:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [8:1.00]
+; BTVER2-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_vcvtph2ps_128:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load <8 x i16>, <8 x i16> *%a1
+ %2 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %1)
+ %3 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0)
+ %4 = fadd <4 x float> %2, %3
+ ret <4 x float> %4
+}
+declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>)
+
+define <8 x float> @test_vcvtph2ps_256(<8 x i16> %a0, <8 x i16> *%a1) {
+; IVY-LABEL: test_vcvtph2ps_256:
+; IVY: # BB#0:
+; IVY-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [7:1.00]
+; IVY-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [3:1.00]
+; IVY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; IVY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_vcvtph2ps_256:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [7:1.00]
+; HASWELL-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [4:1.00]
+; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_vcvtph2ps_256:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [8:1.00]
+; BTVER2-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_vcvtph2ps_256:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load <8 x i16>, <8 x i16> *%a1
+ %2 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %1)
+ %3 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0)
+ %4 = fadd <8 x float> %2, %3
+ ret <8 x float> %4
+}
+declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>)
+
+define <8 x i16> @test_vcvtps2ph_128(<4 x float> %a0, <4 x float> %a1, <4 x i16> *%a2) {
+; IVY-LABEL: test_vcvtps2ph_128:
+; IVY: # BB#0:
+; IVY-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [3:1.00]
+; IVY-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [7:1.00]
+; IVY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_vcvtps2ph_128:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [4:1.00]
+; HASWELL-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [8:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_vcvtps2ph_128:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [8:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_vcvtps2ph_128:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [12:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0)
+ %2 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a1, i32 0)
+ %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i16> %3, <4 x i16> *%a2
+ ret <8 x i16> %1
+}
+declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32)
+
+define <8 x i16> @test_vcvtps2ph_256(<8 x float> %a0, <8 x float> %a1, <8 x i16> *%a2) {
+; IVY-LABEL: test_vcvtps2ph_256:
+; IVY: # BB#0:
+; IVY-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [3:1.00]
+; IVY-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [7:1.00]
+; IVY-NEXT: vzeroupper # sched: [?:0.000000e+00]
+; IVY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_vcvtps2ph_256:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [4:1.00]
+; HASWELL-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [8:1.00]
+; HASWELL-NEXT: vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_vcvtps2ph_256:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [8:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_vcvtps2ph_256:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [12:1.00]
+; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0)
+ %2 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a1, i32 0)
+ store <8 x i16> %2, <8 x i16> *%a2
+ ret <8 x i16> %1
+}
+declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32)
diff --git a/test/CodeGen/X86/fast-isel-x86-64.ll b/test/CodeGen/X86/fast-isel-x86-64.ll
index 3d5c12c03484f..c87353ed1f5ad 100644
--- a/test/CodeGen/X86/fast-isel-x86-64.ll
+++ b/test/CodeGen/X86/fast-isel-x86-64.ll
@@ -316,7 +316,7 @@ define void @allocamaterialize() {
; STDERR-NOT: FastISel missed terminator: ret void
; CHECK-LABEL: win64ccfun
-define x86_64_win64cc void @win64ccfun(i32 %i) {
+define win64cc void @win64ccfun(i32 %i) {
; CHECK: ret
ret void
}
diff --git a/test/CodeGen/X86/hipe-cc.ll b/test/CodeGen/X86/hipe-cc.ll
index fbc4cd9d4f9c0..86469dad23f22 100644
--- a/test/CodeGen/X86/hipe-cc.ll
+++ b/test/CodeGen/X86/hipe-cc.ll
@@ -48,11 +48,7 @@ entry:
store i32 %arg0, i32* %arg0_var
store i32 %arg1, i32* %arg1_var
store i32 %arg2, i32* %arg2_var
-
- ; CHECK: movl 16(%esp), %esi
- ; CHECK-NEXT: movl 12(%esp), %ebp
- ; CHECK-NEXT: movl 8(%esp), %eax
- ; CHECK-NEXT: movl 4(%esp), %edx
+ ; These loads are loading the values from their previous stores and are optimized away.
%0 = load i32, i32* %hp_var
%1 = load i32, i32* %p_var
%2 = load i32, i32* %arg0_var
diff --git a/test/CodeGen/X86/hipe-cc64.ll b/test/CodeGen/X86/hipe-cc64.ll
index 43e2e1409fdee..efe07cf6301e9 100644
--- a/test/CodeGen/X86/hipe-cc64.ll
+++ b/test/CodeGen/X86/hipe-cc64.ll
@@ -57,11 +57,7 @@ entry:
store i64 %arg2, i64* %arg2_var
store i64 %arg3, i64* %arg3_var
- ; CHECK: movq 40(%rsp), %r15
- ; CHECK-NEXT: movq 32(%rsp), %rbp
- ; CHECK-NEXT: movq 24(%rsp), %rsi
- ; CHECK-NEXT: movq 16(%rsp), %rdx
- ; CHECK-NEXT: movq 8(%rsp), %rcx
+ ; Loads are reading values just writen from corresponding register and are therefore noops.
%0 = load i64, i64* %hp_var
%1 = load i64, i64* %p_var
%2 = load i64, i64* %arg0_var
diff --git a/test/CodeGen/X86/lea32-schedule.ll b/test/CodeGen/X86/lea32-schedule.ll
new file mode 100644
index 0000000000000..e42ce30c5a6d2
--- /dev/null
+++ b/test/CodeGen/X86/lea32-schedule.ll
@@ -0,0 +1,653 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=x86-64 | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=slm | FileCheck %s --check-prefix=CHECK --check-prefix=SLM
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=sandybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=ivybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define i32 @test_lea_offset(i32) {
+; GENERIC-LABEL: test_lea_offset:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_offset:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal -24(%rdi), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_offset:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_offset:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_offset:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_offset:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_offset:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = add nsw i32 %0, -24
+ ret i32 %2
+}
+
+define i32 @test_lea_offset_big(i32) {
+; GENERIC-LABEL: test_lea_offset_big:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_offset_big:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal 1024(%rdi), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_offset_big:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_offset_big:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_offset_big:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_offset_big:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_offset_big:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = add nsw i32 %0, 1024
+ ret i32 %2
+}
+
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @test_lea_add(i32, i32) {
+; GENERIC-LABEL: test_lea_add:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal (%rdi,%rsi), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = add nsw i32 %1, %0
+ ret i32 %3
+}
+
+define i32 @test_lea_add_offset(i32, i32) {
+; GENERIC-LABEL: test_lea_add_offset:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_offset:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal 16(%rdi,%rsi), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_offset:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_offset:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
+; SANDY-NEXT: addl $16, %eax # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_offset:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl $16, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_offset:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_offset:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = add i32 %0, 16
+ %4 = add i32 %3, %1
+ ret i32 %4
+}
+
+define i32 @test_lea_add_offset_big(i32, i32) {
+; GENERIC-LABEL: test_lea_add_offset_big:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_offset_big:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal -4096(%rdi,%rsi), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_offset_big:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_offset_big:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
+; SANDY-NEXT: addl $-4096, %eax # imm = 0xF000
+; SANDY-NEXT: # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_offset_big:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl $-4096, %eax # imm = 0xF000
+; HASWELL-NEXT: # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_offset_big:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_offset_big:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = add i32 %0, -4096
+ %4 = add i32 %3, %1
+ ret i32 %4
+}
+
+define i32 @test_lea_mul(i32) {
+; GENERIC-LABEL: test_lea_mul:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_mul:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal (%rdi,%rdi,2), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_mul:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_mul:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_mul:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_mul:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_mul:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = mul nsw i32 %0, 3
+ ret i32 %2
+}
+
+define i32 @test_lea_mul_offset(i32) {
+; GENERIC-LABEL: test_lea_mul_offset:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_mul_offset:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal -32(%rdi,%rdi,2), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_mul_offset:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_mul_offset:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
+; SANDY-NEXT: addl $-32, %eax # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_mul_offset:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_mul_offset:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_mul_offset:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = mul nsw i32 %0, 3
+ %3 = add nsw i32 %2, -32
+ ret i32 %3
+}
+
+define i32 @test_lea_mul_offset_big(i32) {
+; GENERIC-LABEL: test_lea_mul_offset_big:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_mul_offset_big:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal 10000(%rdi,%rdi,8), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_mul_offset_big:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_mul_offset_big:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
+; SANDY-NEXT: addl $10000, %eax # imm = 0x2710
+; SANDY-NEXT: # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_mul_offset_big:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl $10000, %eax # imm = 0x2710
+; HASWELL-NEXT: # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_mul_offset_big:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_mul_offset_big:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = mul nsw i32 %0, 9
+ %3 = add nsw i32 %2, 10000
+ ret i32 %3
+}
+
+define i32 @test_lea_add_scale(i32, i32) {
+; GENERIC-LABEL: test_lea_add_scale:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_scale:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal (%rdi,%rsi,2), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_scale:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_scale:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_scale:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_scale:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_scale:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = shl i32 %1, 1
+ %4 = add nsw i32 %3, %0
+ ret i32 %4
+}
+
+define i32 @test_lea_add_scale_offset(i32, i32) {
+; GENERIC-LABEL: test_lea_add_scale_offset:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_scale_offset:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal 96(%rdi,%rsi,4), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_scale_offset:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_scale_offset:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
+; SANDY-NEXT: addl $96, %eax # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_scale_offset:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl $96, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_scale_offset:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_scale_offset:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = shl i32 %1, 2
+ %4 = add i32 %0, 96
+ %5 = add i32 %4, %3
+ ret i32 %5
+}
+
+define i32 @test_lea_add_scale_offset_big(i32, i32) {
+; GENERIC-LABEL: test_lea_add_scale_offset_big:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_scale_offset_big:
+; ATOM: # BB#0:
+; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: leal -1200(%rdi,%rsi,8), %eax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_scale_offset_big:
+; SLM: # BB#0:
+; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_scale_offset_big:
+; SANDY: # BB#0:
+; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
+; SANDY-NEXT: addl $-1200, %eax # imm = 0xFB50
+; SANDY-NEXT: # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_scale_offset_big:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
+; HASWELL-NEXT: addl $-1200, %eax # imm = 0xFB50
+; HASWELL-NEXT: # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_scale_offset_big:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_scale_offset_big:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = shl i32 %1, 3
+ %4 = add i32 %0, -1200
+ %5 = add i32 %4, %3
+ ret i32 %5
+}
diff --git a/test/CodeGen/X86/lea64-schedule.ll b/test/CodeGen/X86/lea64-schedule.ll
new file mode 100644
index 0000000000000..0ff1574c809df
--- /dev/null
+++ b/test/CodeGen/X86/lea64-schedule.ll
@@ -0,0 +1,534 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=x86-64 | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=slm | FileCheck %s --check-prefix=CHECK --check-prefix=SLM
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=sandybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=ivybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define i64 @test_lea_offset(i64) {
+; GENERIC-LABEL: test_lea_offset:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_offset:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq -24(%rdi), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_offset:
+; SLM: # BB#0:
+; SLM-NEXT: leaq -24(%rdi), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_offset:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_offset:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_offset:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq -24(%rdi), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_offset:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq -24(%rdi), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = add nsw i64 %0, -24
+ ret i64 %2
+}
+
+define i64 @test_lea_offset_big(i64) {
+; GENERIC-LABEL: test_lea_offset_big:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_offset_big:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq 1024(%rdi), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_offset_big:
+; SLM: # BB#0:
+; SLM-NEXT: leaq 1024(%rdi), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_offset_big:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_offset_big:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_offset_big:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_offset_big:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = add nsw i64 %0, 1024
+ ret i64 %2
+}
+
+; Function Attrs: norecurse nounwind readnone uwtable
+define i64 @test_lea_add(i64, i64) {
+; GENERIC-LABEL: test_lea_add:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq (%rdi,%rsi), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add:
+; SLM: # BB#0:
+; SLM-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = add nsw i64 %1, %0
+ ret i64 %3
+}
+
+define i64 @test_lea_add_offset(i64, i64) {
+; GENERIC-LABEL: test_lea_add_offset:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_offset:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq 16(%rdi,%rsi), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_offset:
+; SLM: # BB#0:
+; SLM-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_offset:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
+; SANDY-NEXT: addq $16, %rax # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_offset:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq $16, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_offset:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_offset:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = add i64 %0, 16
+ %4 = add i64 %3, %1
+ ret i64 %4
+}
+
+define i64 @test_lea_add_offset_big(i64, i64) {
+; GENERIC-LABEL: test_lea_add_offset_big:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_offset_big:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq -4096(%rdi,%rsi), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_offset_big:
+; SLM: # BB#0:
+; SLM-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_offset_big:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
+; SANDY-NEXT: addq $-4096, %rax # imm = 0xF000
+; SANDY-NEXT: # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_offset_big:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq $-4096, %rax # imm = 0xF000
+; HASWELL-NEXT: # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_offset_big:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_offset_big:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = add i64 %0, -4096
+ %4 = add i64 %3, %1
+ ret i64 %4
+}
+
+define i64 @test_lea_mul(i64) {
+; GENERIC-LABEL: test_lea_mul:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_mul:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq (%rdi,%rdi,2), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_mul:
+; SLM: # BB#0:
+; SLM-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_mul:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_mul:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_mul:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_mul:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = mul nsw i64 %0, 3
+ ret i64 %2
+}
+
+define i64 @test_lea_mul_offset(i64) {
+; GENERIC-LABEL: test_lea_mul_offset:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_mul_offset:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq -32(%rdi,%rdi,2), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_mul_offset:
+; SLM: # BB#0:
+; SLM-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_mul_offset:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
+; SANDY-NEXT: addq $-32, %rax # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_mul_offset:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq $-32, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_mul_offset:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_mul_offset:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = mul nsw i64 %0, 3
+ %3 = add nsw i64 %2, -32
+ ret i64 %3
+}
+
+define i64 @test_lea_mul_offset_big(i64) {
+; GENERIC-LABEL: test_lea_mul_offset_big:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_mul_offset_big:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq 10000(%rdi,%rdi,8), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_mul_offset_big:
+; SLM: # BB#0:
+; SLM-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_mul_offset_big:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
+; SANDY-NEXT: addq $10000, %rax # imm = 0x2710
+; SANDY-NEXT: # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_mul_offset_big:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq $10000, %rax # imm = 0x2710
+; HASWELL-NEXT: # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_mul_offset_big:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_mul_offset_big:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %2 = mul nsw i64 %0, 9
+ %3 = add nsw i64 %2, 10000
+ ret i64 %3
+}
+
+define i64 @test_lea_add_scale(i64, i64) {
+; GENERIC-LABEL: test_lea_add_scale:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_scale:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq (%rdi,%rsi,2), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_scale:
+; SLM: # BB#0:
+; SLM-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_scale:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_scale:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_scale:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_scale:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = shl i64 %1, 1
+ %4 = add nsw i64 %3, %0
+ ret i64 %4
+}
+
+define i64 @test_lea_add_scale_offset(i64, i64) {
+; GENERIC-LABEL: test_lea_add_scale_offset:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_scale_offset:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq 96(%rdi,%rsi,4), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_scale_offset:
+; SLM: # BB#0:
+; SLM-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_scale_offset:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
+; SANDY-NEXT: addq $96, %rax # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_scale_offset:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rsi,4), %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq $96, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_scale_offset:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_scale_offset:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = shl i64 %1, 2
+ %4 = add i64 %0, 96
+ %5 = add i64 %4, %3
+ ret i64 %5
+}
+
+define i64 @test_lea_add_scale_offset_big(i64, i64) {
+; GENERIC-LABEL: test_lea_add_scale_offset_big:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.50]
+; GENERIC-NEXT: retq # sched: [1:1.00]
+;
+; ATOM-LABEL: test_lea_add_scale_offset_big:
+; ATOM: # BB#0:
+; ATOM-NEXT: leaq -1200(%rdi,%rsi,8), %rax
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: nop
+; ATOM-NEXT: retq
+;
+; SLM-LABEL: test_lea_add_scale_offset_big:
+; SLM: # BB#0:
+; SLM-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:1.00]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_lea_add_scale_offset_big:
+; SANDY: # BB#0:
+; SANDY-NEXT: leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
+; SANDY-NEXT: addq $-1200, %rax # imm = 0xFB50
+; SANDY-NEXT: # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_lea_add_scale_offset_big:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: leaq (%rdi,%rsi,8), %rax # sched: [1:0.50]
+; HASWELL-NEXT: addq $-1200, %rax # imm = 0xFB50
+; HASWELL-NEXT: # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_lea_add_scale_offset_big:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lea_add_scale_offset_big:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %3 = shl i64 %1, 3
+ %4 = add i64 %0, -1200
+ %5 = add i64 %4, %3
+ ret i64 %5
+}
diff --git a/test/CodeGen/X86/legalize-shift-64.ll b/test/CodeGen/X86/legalize-shift-64.ll
index b3f2116e6486d..3ad6cad32d834 100644
--- a/test/CodeGen/X86/legalize-shift-64.ll
+++ b/test/CodeGen/X86/legalize-shift-64.ll
@@ -148,8 +148,7 @@ define i32 @test6() {
; CHECK-NEXT: andl $-8, %esp
; CHECK-NEXT: subl $16, %esp
; CHECK-NEXT: movl $1, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl $1, (%esp)
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: shldl $32, %eax, %ecx
@@ -175,9 +174,8 @@ define i32 @test6() {
; CHECK-NEXT: retl
%x = alloca i32, align 4
%t = alloca i64, align 8
- store i32 1, i32* %x, align 4
- store i64 1, i64* %t, align 8 ;; DEAD
- %load = load i32, i32* %x, align 4
+ store volatile i32 1, i32* %x, align 4
+ %load = load volatile i32, i32* %x, align 4
%shl = shl i32 %load, 8
%add = add i32 %shl, -224
%sh_prom = zext i32 %add to i64
diff --git a/test/CodeGen/X86/lzcnt-schedule.ll b/test/CodeGen/X86/lzcnt-schedule.ll
new file mode 100644
index 0000000000000..cd0dcbbd6afbd
--- /dev/null
+++ b/test/CodeGen/X86/lzcnt-schedule.ll
@@ -0,0 +1,119 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mattr=+lzcnt | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
+; GENERIC-LABEL: test_ctlz_i16:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: lzcntw (%rsi), %cx
+; GENERIC-NEXT: lzcntw %di, %ax
+; GENERIC-NEXT: orl %ecx, %eax
+; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_ctlz_i16:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: lzcntw (%rsi), %cx
+; HASWELL-NEXT: lzcntw %di, %ax
+; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_ctlz_i16:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: lzcntw (%rsi), %cx
+; BTVER2-NEXT: lzcntw %di, %ax
+; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ctlz_i16:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: lzcntw (%rsi), %cx
+; ZNVER1-NEXT: lzcntw %di, %ax
+; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i16, i16 *%a1
+ %2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false )
+ %3 = tail call i16 @llvm.ctlz.i16( i16 %a0, i1 false )
+ %4 = or i16 %2, %3
+ ret i16 %4
+}
+declare i16 @llvm.ctlz.i16(i16, i1)
+
+define i32 @test_ctlz_i32(i32 %a0, i32 *%a1) {
+; GENERIC-LABEL: test_ctlz_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: lzcntl (%rsi), %ecx
+; GENERIC-NEXT: lzcntl %edi, %eax
+; GENERIC-NEXT: orl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_ctlz_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: lzcntl (%rsi), %ecx
+; HASWELL-NEXT: lzcntl %edi, %eax
+; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_ctlz_i32:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: lzcntl (%rsi), %ecx
+; BTVER2-NEXT: lzcntl %edi, %eax
+; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ctlz_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: lzcntl (%rsi), %ecx
+; ZNVER1-NEXT: lzcntl %edi, %eax
+; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a1
+ %2 = tail call i32 @llvm.ctlz.i32( i32 %1, i1 false )
+ %3 = tail call i32 @llvm.ctlz.i32( i32 %a0, i1 false )
+ %4 = or i32 %2, %3
+ ret i32 %4
+}
+declare i32 @llvm.ctlz.i32(i32, i1)
+
+define i64 @test_ctlz_i64(i64 %a0, i64 *%a1) {
+; GENERIC-LABEL: test_ctlz_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: lzcntq (%rsi), %rcx
+; GENERIC-NEXT: lzcntq %rdi, %rax
+; GENERIC-NEXT: orq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; HASWELL-LABEL: test_ctlz_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: lzcntq (%rsi), %rcx
+; HASWELL-NEXT: lzcntq %rdi, %rax
+; HASWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_ctlz_i64:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: lzcntq (%rsi), %rcx
+; BTVER2-NEXT: lzcntq %rdi, %rax
+; BTVER2-NEXT: orq %rcx, %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ctlz_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: lzcntq (%rsi), %rcx
+; ZNVER1-NEXT: lzcntq %rdi, %rax
+; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a1
+ %2 = tail call i64 @llvm.ctlz.i64( i64 %1, i1 false )
+ %3 = tail call i64 @llvm.ctlz.i64( i64 %a0, i1 false )
+ %4 = or i64 %2, %3
+ ret i64 %4
+}
+declare i64 @llvm.ctlz.i64(i64, i1)
diff --git a/test/CodeGen/X86/machine-outliner-debuginfo.ll b/test/CodeGen/X86/machine-outliner-debuginfo.ll
index 26a194764086d..02d0964e37eb5 100644
--- a/test/CodeGen/X86/machine-outliner-debuginfo.ll
+++ b/test/CodeGen/X86/machine-outliner-debuginfo.ll
@@ -17,6 +17,7 @@ define i32 @main() #0 !dbg !11 {
call void @llvm.dbg.value(metadata i32 10, i64 0, metadata !15, metadata !16), !dbg !17
store i32 4, i32* %5, align 4
store i32 0, i32* @x, align 4, !dbg !24
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; This is the same sequence of instructions without a debug value. It should be outlined
; in the same way.
; CHECK: callq l_OUTLINED_FUNCTION_0
diff --git a/test/CodeGen/X86/machine-outliner.ll b/test/CodeGen/X86/machine-outliner.ll
index 9f8e6ec298f4e..b4a277ec2d825 100644
--- a/test/CodeGen/X86/machine-outliner.ll
+++ b/test/CodeGen/X86/machine-outliner.ll
@@ -85,6 +85,7 @@ define i32 @main() #0 {
store i32 3, i32* %4, align 4
store i32 4, i32* %5, align 4
store i32 1, i32* @x, align 4
+ call void asm sideeffect "", "~{memory},~{dirflag},~{fpsr},~{flags}"()
; CHECK: callq [[OFUNC2]]
store i32 1, i32* %2, align 4
store i32 2, i32* %3, align 4
diff --git a/test/CodeGen/X86/memcmp-minsize.ll b/test/CodeGen/X86/memcmp-minsize.ll
new file mode 100644
index 0000000000000..a7f42644ca2d5
--- /dev/null
+++ b/test/CodeGen/X86/memcmp-minsize.ll
@@ -0,0 +1,721 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=cmov | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
+
+; This tests codegen time inlining/optimization of memcmp
+; rdar://6480398
+
+@.str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1
+
+declare i32 @memcmp(i8*, i8*, i64)
+
+define i32 @length2(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length2:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $2, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length2:
+; X64: # BB#0:
+; X64-NEXT: pushq $2
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ ret i32 %m
+}
+
+define i1 @length2_eq(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length2_eq:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: cmpw (%eax), %cx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq:
+; X64: # BB#0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpw (%rsi), %ax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_eq_const(i8* %X) nounwind minsize {
+; X86-LABEL: length2_eq_const:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpw $12849, (%eax) # imm = 0x3231
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq_const:
+; X64: # BB#0:
+; X64-NEXT: cmpw $12849, (%rdi) # imm = 0x3231
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 2) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length2_eq_nobuiltin_attr:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $2, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq_nobuiltin_attr:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: pushq $2
+; X64-NEXT: popq %rdx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind nobuiltin
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length3(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length3:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $3, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length3:
+; X64: # BB#0:
+; X64-NEXT: pushq $3
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
+ ret i32 %m
+}
+
+define i1 @length3_eq(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length3_eq:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $3, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length3_eq:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: pushq $3
+; X64-NEXT: popq %rdx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length4(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length4:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $4, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length4:
+; X64: # BB#0:
+; X64-NEXT: pushq $4
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ ret i32 %m
+}
+
+define i1 @length4_eq(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length4_eq:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: cmpl (%eax), %ecx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_eq:
+; X64: # BB#0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: cmpl (%rsi), %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length4_eq_const(i8* %X) nounwind minsize {
+; X86-LABEL: length4_eq_const:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_eq_const:
+; X64: # BB#0:
+; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 4) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length5(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length5:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $5, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length5:
+; X64: # BB#0:
+; X64-NEXT: pushq $5
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ ret i32 %m
+}
+
+define i1 @length5_eq(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length5_eq:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $5, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length5_eq:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: pushq $5
+; X64-NEXT: popq %rdx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length8(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length8:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $8, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length8:
+; X64: # BB#0:
+; X64-NEXT: pushq $8
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ ret i32 %m
+}
+
+define i1 @length8_eq(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length8_eq:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $8, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length8_eq:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length8_eq_const(i8* %X) nounwind minsize {
+; X86-LABEL: length8_eq_const:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $8, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $.L.str, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length8_eq_const:
+; X64: # BB#0:
+; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
+; X64-NEXT: cmpq %rax, (%rdi)
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 8) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length12_eq(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length12_eq:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $12, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length12_eq:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: pushq $12
+; X64-NEXT: popq %rdx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length12(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length12:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $12, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length12:
+; X64: # BB#0:
+; X64-NEXT: pushq $12
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
+ ret i32 %m
+}
+
+; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329
+
+define i32 @length16(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length16:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $16, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length16:
+; X64: # BB#0:
+; X64-NEXT: pushq $16
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind
+ ret i32 %m
+}
+
+define i1 @length16_eq(i8* %x, i8* %y) nounwind minsize {
+; X86-NOSSE-LABEL: length16_eq:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl $16, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu (%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X86-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length16_eq:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm0
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length16_eq:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length16_eq_const(i8* %X) nounwind minsize {
+; X86-NOSSE-LABEL: length16_eq_const:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl $16, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl $.L.str, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq_const:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length16_eq_const:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length16_eq_const:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length32(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length32:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $32, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length32:
+; X64: # BB#0:
+; X64-NEXT: pushq $32
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
+ ret i32 %m
+}
+
+; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325
+
+define i1 @length32_eq(i8* %x, i8* %y) nounwind minsize {
+; X86-LABEL: length32_eq:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $32, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: pushq $32
+; X64-SSE2-NEXT: popq %rdx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
+ %cmp = icmp eq i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length32_eq_const(i8* %X) nounwind minsize {
+; X86-LABEL: length32_eq_const:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $32, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $.L.str, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq_const:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: pushq $32
+; X64-SSE2-NEXT: popq %rdx
+; X64-SSE2-NEXT: movl $.L.str, %esi
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq_const:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length64(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length64:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $64, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length64:
+; X64: # BB#0:
+; X64-NEXT: pushq $64
+; X64-NEXT: popq %rdx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind
+ ret i32 %m
+}
+
+define i1 @length64_eq(i8* %x, i8* %y) nounwind minsize {
+; X86-LABEL: length64_eq:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $64, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length64_eq:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: pushq $64
+; X64-NEXT: popq %rdx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length64_eq_const(i8* %X) nounwind minsize {
+; X86-LABEL: length64_eq_const:
+; X86: # BB#0:
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, (%esp)
+; X86-NEXT: andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $64, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $.L.str, {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length64_eq_const:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: pushq $64
+; X64-NEXT: popq %rdx
+; X64-NEXT: movl $.L.str, %esi
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
diff --git a/test/CodeGen/X86/memcmp-optsize.ll b/test/CodeGen/X86/memcmp-optsize.ll
new file mode 100644
index 0000000000000..450205a966d23
--- /dev/null
+++ b/test/CodeGen/X86/memcmp-optsize.ll
@@ -0,0 +1,871 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=cmov | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
+
+; This tests codegen time inlining/optimization of memcmp
+; rdar://6480398
+
+@.str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1
+
+declare i32 @memcmp(i8*, i8*, i64)
+
+define i32 @length2(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length2:
+; X86: # BB#0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: rolw $8, %cx
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: xorl %edi, %edi
+; X86-NEXT: incl %edi
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: decl %eax
+; X86-NEXT: cmpw %dx, %cx
+; X86-NEXT: cmovael %edi, %eax
+; X86-NEXT: cmovel %esi, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
+;
+; X64-LABEL: length2:
+; X64: # BB#0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpw %cx, %ax
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: cmovel %edx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ ret i32 %m
+}
+
+define i1 @length2_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length2_eq:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: cmpw (%eax), %cx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq:
+; X64: # BB#0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpw (%rsi), %ax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length2_eq_const:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq_const:
+; X64: # BB#0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 2) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length2_eq_nobuiltin_attr:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $2
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq_nobuiltin_attr:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $2, %edx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind nobuiltin
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length3(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length3:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: movzwl (%ecx), %esi
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: rolw $8, %si
+; X86-NEXT: movzwl %dx, %edx
+; X86-NEXT: movzwl %si, %esi
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: jne .LBB4_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movzbl 2(%eax), %eax
+; X86-NEXT: movzbl 2(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: jmp .LBB4_3
+; X86-NEXT: .LBB4_1: # %res_block
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: incl %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: decl %eax
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: cmovael %ecx, %eax
+; X86-NEXT: .LBB4_3: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length3:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %cx, %ecx
+; X64-NEXT: cmpq %rcx, %rax
+; X64-NEXT: jne .LBB4_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movzbl 2(%rdi), %eax
+; X64-NEXT: movzbl 2(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB4_1: # %res_block
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
+ ret i32 %m
+}
+
+define i1 @length3_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length3_eq:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: cmpw (%ecx), %dx
+; X86-NEXT: jne .LBB5_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movb 2(%eax), %dl
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpb 2(%ecx), %dl
+; X86-NEXT: je .LBB5_3
+; X86-NEXT: .LBB5_1: # %res_block
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: .LBB5_3: # %endblock
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length3_eq:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpw (%rsi), %ax
+; X64-NEXT: jne .LBB5_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movb 2(%rdi), %cl
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpb 2(%rsi), %cl
+; X64-NEXT: je .LBB5_3
+; X64-NEXT: .LBB5_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB5_3: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length4(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length4:
+; X86: # BB#0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: xorl %edi, %edi
+; X86-NEXT: incl %edi
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: decl %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: cmovael %edi, %eax
+; X86-NEXT: cmovel %esi, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
+;
+; X64-LABEL: length4:
+; X64: # BB#0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpl %ecx, %eax
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: cmovel %edx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ ret i32 %m
+}
+
+define i1 @length4_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length4_eq:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: cmpl (%eax), %ecx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_eq:
+; X64: # BB#0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: cmpl (%rsi), %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length4_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length4_eq_const:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_eq_const:
+; X64: # BB#0:
+; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 4) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length5(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length5:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl (%ecx), %esi
+; X86-NEXT: bswapl %edx
+; X86-NEXT: bswapl %esi
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: jne .LBB9_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movzbl 4(%eax), %eax
+; X86-NEXT: movzbl 4(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: jmp .LBB9_3
+; X86-NEXT: .LBB9_1: # %res_block
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: incl %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: decl %eax
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: cmovael %ecx, %eax
+; X86-NEXT: .LBB9_3: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length5:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: cmpq %rcx, %rax
+; X64-NEXT: jne .LBB9_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movzbl 4(%rdi), %eax
+; X64-NEXT: movzbl 4(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB9_1: # %res_block
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ ret i32 %m
+}
+
+define i1 @length5_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length5_eq:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: cmpl (%ecx), %edx
+; X86-NEXT: jne .LBB10_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movb 4(%eax), %dl
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpb 4(%ecx), %dl
+; X86-NEXT: je .LBB10_3
+; X86-NEXT: .LBB10_1: # %res_block
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: .LBB10_3: # %endblock
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length5_eq:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: cmpl (%rsi), %eax
+; X64-NEXT: jne .LBB10_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movb 4(%rdi), %cl
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpb 4(%rsi), %cl
+; X64-NEXT: je .LBB10_3
+; X64-NEXT: .LBB10_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB10_3: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length8(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length8:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB11_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movl 4(%esi), %ecx
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: je .LBB11_3
+; X86-NEXT: .LBB11_1: # %res_block
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: incl %esi
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: decl %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: cmovael %esi, %eax
+; X86-NEXT: .LBB11_3: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length8:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: movq (%rsi), %rcx
+; X64-NEXT: bswapq %rax
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpq %rcx, %rax
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: cmovel %edx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ ret i32 %m
+}
+
+define i1 @length8_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length8_eq:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: cmpl (%ecx), %edx
+; X86-NEXT: jne .LBB12_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl 4(%ecx), %edx
+; X86-NEXT: je .LBB12_3
+; X86-NEXT: .LBB12_1: # %res_block
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: .LBB12_3: # %endblock
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length8_eq:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length8_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length8_eq_const:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $858927408, (%ecx) # imm = 0x33323130
+; X86-NEXT: jne .LBB13_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl $926299444, 4(%ecx) # imm = 0x37363534
+; X86-NEXT: je .LBB13_3
+; X86-NEXT: .LBB13_1: # %res_block
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: .LBB13_3: # %endblock
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length8_eq_const:
+; X64: # BB#0:
+; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
+; X64-NEXT: cmpq %rax, (%rdi)
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 8) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length12_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length12_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length12_eq:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: jne .LBB14_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movl 8(%rdi), %ecx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl 8(%rsi), %ecx
+; X64-NEXT: je .LBB14_3
+; X64-NEXT: .LBB14_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB14_3: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length12(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length12:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length12:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB15_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movl 8(%rdi), %ecx
+; X64-NEXT: movl 8(%rsi), %edx
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: bswapl %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB15_1
+; X64-NEXT: # BB#3: # %endblock
+; X64-NEXT: retq
+; X64-NEXT: .LBB15_1: # %res_block
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
+ ret i32 %m
+}
+
+; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329
+
+define i32 @length16(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length16:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $16
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length16:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB16_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: movq 8(%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB16_1
+; X64-NEXT: # BB#3: # %endblock
+; X64-NEXT: retq
+; X64-NEXT: .LBB16_1: # %res_block
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind
+ ret i32 %m
+}
+
+define i1 @length16_eq(i8* %x, i8* %y) nounwind optsize {
+; X86-NOSSE-LABEL: length16_eq:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu (%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X86-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-LABEL: length16_eq:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: jne .LBB17_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq 8(%rsi), %rcx
+; X64-NEXT: je .LBB17_3
+; X64-NEXT: .LBB17_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB17_3: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length16_eq_const(i8* %X) nounwind optsize {
+; X86-NOSSE-LABEL: length16_eq_const:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq_const:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-LABEL: length16_eq_const:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
+; X64-NEXT: cmpq %rax, (%rdi)
+; X64-NEXT: jne .LBB18_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: movabsq $3833745473465760056, %rcx # imm = 0x3534333231303938
+; X64-NEXT: cmpq %rcx, 8(%rdi)
+; X64-NEXT: je .LBB18_3
+; X64-NEXT: .LBB18_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB18_3: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length32(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length32:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length32:
+; X64: # BB#0:
+; X64-NEXT: movl $32, %edx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
+ ret i32 %m
+}
+
+; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325
+
+define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
+; X86-LABEL: length32_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $32, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
+ %cmp = icmp eq i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length32_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length32_eq_const:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq_const:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $.L.str, %esi
+; X64-SSE2-NEXT: movl $32, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq_const:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length64(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length64:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length64:
+; X64: # BB#0:
+; X64-NEXT: movl $64, %edx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind
+ ret i32 %m
+}
+
+define i1 @length64_eq(i8* %x, i8* %y) nounwind optsize {
+; X86-LABEL: length64_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length64_eq:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $64, %edx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length64_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length64_eq_const:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length64_eq_const:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $.L.str, %esi
+; X64-NEXT: movl $64, %edx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll
index 0e09abf73c8c9..2e67827654624 100644
--- a/test/CodeGen/X86/memcmp.ll
+++ b/test/CodeGen/X86/memcmp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=cmov | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
; This tests codegen time inlining/optimization of memcmp
; rdar://6480398
@@ -12,43 +12,21 @@
declare i32 @memcmp(i8*, i8*, i64)
define i32 @length2(i8* %X, i8* %Y) nounwind {
-; X86-NOSSE-LABEL: length2:
-; X86-NOSSE: # BB#0:
-; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NOSSE-NEXT: movzwl (%ecx), %ecx
-; X86-NOSSE-NEXT: movzwl (%eax), %eax
-; X86-NOSSE-NEXT: rolw $8, %cx
-; X86-NOSSE-NEXT: rolw $8, %ax
-; X86-NOSSE-NEXT: cmpw %ax, %cx
-; X86-NOSSE-NEXT: movl $-1, %eax
-; X86-NOSSE-NEXT: jae .LBB0_1
-; X86-NOSSE-NEXT: # BB#2:
-; X86-NOSSE-NEXT: je .LBB0_3
-; X86-NOSSE-NEXT: .LBB0_4:
-; X86-NOSSE-NEXT: retl
-; X86-NOSSE-NEXT: .LBB0_1:
-; X86-NOSSE-NEXT: movl $1, %eax
-; X86-NOSSE-NEXT: jne .LBB0_4
-; X86-NOSSE-NEXT: .LBB0_3:
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: retl
-;
-; X86-SSE2-LABEL: length2:
-; X86-SSE2: # BB#0:
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE2-NEXT: movzwl (%ecx), %ecx
-; X86-SSE2-NEXT: movzwl (%eax), %eax
-; X86-SSE2-NEXT: rolw $8, %cx
-; X86-SSE2-NEXT: rolw $8, %ax
-; X86-SSE2-NEXT: xorl %edx, %edx
-; X86-SSE2-NEXT: cmpw %ax, %cx
-; X86-SSE2-NEXT: movl $-1, %ecx
-; X86-SSE2-NEXT: movl $1, %eax
-; X86-SSE2-NEXT: cmovbl %ecx, %eax
-; X86-SSE2-NEXT: cmovel %edx, %eax
-; X86-SSE2-NEXT: retl
+; X86-LABEL: length2:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: rolw $8, %cx
+; X86-NEXT: rolw $8, %ax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: cmpw %ax, %cx
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: cmovel %edx, %eax
+; X86-NEXT: retl
;
; X64-LABEL: length2:
; X64: # BB#0:
@@ -137,44 +115,90 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
define i32 @length3(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length3:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $3
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: movzwl (%ecx), %esi
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: rolw $8, %si
+; X86-NEXT: movzwl %dx, %edx
+; X86-NEXT: movzwl %si, %esi
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: jne .LBB4_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movzbl 2(%eax), %eax
+; X86-NEXT: movzbl 2(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+; X86-NEXT: .LBB4_1: # %res_block
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: length3:
-; X64: # BB#0:
-; X64-NEXT: movl $3, %edx
-; X64-NEXT: jmp memcmp # TAILCALL
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %cx, %ecx
+; X64-NEXT: cmpq %rcx, %rax
+; X64-NEXT: jne .LBB4_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movzbl 2(%rdi), %eax
+; X64-NEXT: movzbl 2(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB4_1: # %res_block
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
ret i32 %m
}
define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length3_eq:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $3
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: cmpw (%ecx), %dx
+; X86-NEXT: jne .LBB5_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movb 2(%eax), %dl
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpb 2(%ecx), %dl
+; X86-NEXT: je .LBB5_3
+; X86-NEXT: .LBB5_1: # %res_block
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: .LBB5_3: # %endblock
; X86-NEXT: testl %eax, %eax
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: length3_eq:
-; X64: # BB#0:
-; X64-NEXT: pushq %rax
-; X64-NEXT: movl $3, %edx
-; X64-NEXT: callq memcmp
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpw (%rsi), %ax
+; X64-NEXT: jne .LBB5_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movb 2(%rdi), %cl
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpb 2(%rsi), %cl
+; X64-NEXT: je .LBB5_3
+; X64-NEXT: .LBB5_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB5_3: # %endblock
; X64-NEXT: testl %eax, %eax
; X64-NEXT: setne %al
-; X64-NEXT: popq %rcx
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
%c = icmp ne i32 %m, 0
@@ -182,43 +206,21 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
}
define i32 @length4(i8* %X, i8* %Y) nounwind {
-; X86-NOSSE-LABEL: length4:
-; X86-NOSSE: # BB#0:
-; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NOSSE-NEXT: movl (%ecx), %ecx
-; X86-NOSSE-NEXT: movl (%eax), %eax
-; X86-NOSSE-NEXT: bswapl %ecx
-; X86-NOSSE-NEXT: bswapl %eax
-; X86-NOSSE-NEXT: cmpl %eax, %ecx
-; X86-NOSSE-NEXT: movl $-1, %eax
-; X86-NOSSE-NEXT: jae .LBB6_1
-; X86-NOSSE-NEXT: # BB#2:
-; X86-NOSSE-NEXT: je .LBB6_3
-; X86-NOSSE-NEXT: .LBB6_4:
-; X86-NOSSE-NEXT: retl
-; X86-NOSSE-NEXT: .LBB6_1:
-; X86-NOSSE-NEXT: movl $1, %eax
-; X86-NOSSE-NEXT: jne .LBB6_4
-; X86-NOSSE-NEXT: .LBB6_3:
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: retl
-;
-; X86-SSE2-LABEL: length4:
-; X86-SSE2: # BB#0:
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE2-NEXT: movl (%ecx), %ecx
-; X86-SSE2-NEXT: movl (%eax), %eax
-; X86-SSE2-NEXT: bswapl %ecx
-; X86-SSE2-NEXT: bswapl %eax
-; X86-SSE2-NEXT: xorl %edx, %edx
-; X86-SSE2-NEXT: cmpl %eax, %ecx
-; X86-SSE2-NEXT: movl $-1, %ecx
-; X86-SSE2-NEXT: movl $1, %eax
-; X86-SSE2-NEXT: cmovbl %ecx, %eax
-; X86-SSE2-NEXT: cmovel %edx, %eax
-; X86-SSE2-NEXT: retl
+; X86-LABEL: length4:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: movl (%eax), %eax
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: cmpl %eax, %ecx
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: cmovel %edx, %eax
+; X86-NEXT: retl
;
; X64-LABEL: length4:
; X64: # BB#0:
@@ -278,44 +280,86 @@ define i1 @length4_eq_const(i8* %X) nounwind {
define i32 @length5(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length5:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $5
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl (%ecx), %esi
+; X86-NEXT: bswapl %edx
+; X86-NEXT: bswapl %esi
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: jne .LBB9_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movzbl 4(%eax), %eax
+; X86-NEXT: movzbl 4(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+; X86-NEXT: .LBB9_1: # %res_block
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: length5:
-; X64: # BB#0:
-; X64-NEXT: movl $5, %edx
-; X64-NEXT: jmp memcmp # TAILCALL
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: cmpq %rcx, %rax
+; X64-NEXT: jne .LBB9_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movzbl 4(%rdi), %eax
+; X64-NEXT: movzbl 4(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB9_1: # %res_block
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
ret i32 %m
}
define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length5_eq:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $5
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: cmpl (%ecx), %edx
+; X86-NEXT: jne .LBB10_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movb 4(%eax), %dl
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpb 4(%ecx), %dl
+; X86-NEXT: je .LBB10_3
+; X86-NEXT: .LBB10_1: # %res_block
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: .LBB10_3: # %endblock
; X86-NEXT: testl %eax, %eax
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: length5_eq:
-; X64: # BB#0:
-; X64-NEXT: pushq %rax
-; X64-NEXT: movl $5, %edx
-; X64-NEXT: callq memcmp
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: cmpl (%rsi), %eax
+; X64-NEXT: jne .LBB10_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movb 4(%rdi), %cl
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpb 4(%rsi), %cl
+; X64-NEXT: je .LBB10_3
+; X64-NEXT: .LBB10_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB10_3: # %endblock
; X64-NEXT: testl %eax, %eax
; X64-NEXT: setne %al
-; X64-NEXT: popq %rcx
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
%c = icmp ne i32 %m, 0
@@ -324,13 +368,33 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
define i32 @length8(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length8:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $8
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB11_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movl 4(%esi), %ecx
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB11_1
+; X86-NEXT: # BB#3: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+; X86-NEXT: .LBB11_1: # %res_block
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: length8:
@@ -352,13 +416,20 @@ define i32 @length8(i8* %X, i8* %Y) nounwind {
define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length8_eq:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $8
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: cmpl (%ecx), %edx
+; X86-NEXT: jne .LBB12_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl 4(%ecx), %edx
+; X86-NEXT: je .LBB12_3
+; X86-NEXT: .LBB12_1: # %res_block
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: .LBB12_3: # %endblock
; X86-NEXT: testl %eax, %eax
; X86-NEXT: sete %al
; X86-NEXT: retl
@@ -376,13 +447,17 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
define i1 @length8_eq_const(i8* %X) nounwind {
; X86-LABEL: length8_eq_const:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $8
-; X86-NEXT: pushl $.L.str
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $858927408, (%ecx) # imm = 0x33323130
+; X86-NEXT: jne .LBB13_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl $926299444, 4(%ecx) # imm = 0x37363534
+; X86-NEXT: je .LBB13_3
+; X86-NEXT: .LBB13_1: # %res_block
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: .LBB13_3: # %endblock
; X86-NEXT: testl %eax, %eax
; X86-NEXT: setne %al
; X86-NEXT: retl
@@ -400,25 +475,43 @@ define i1 @length8_eq_const(i8* %X) nounwind {
define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length12_eq:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $12
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
-; X86-NEXT: testl %eax, %eax
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %edx
+; X86-NEXT: cmpl (%eax), %edx
+; X86-NEXT: jne .LBB14_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movl 4(%ecx), %edx
+; X86-NEXT: cmpl 4(%eax), %edx
+; X86-NEXT: jne .LBB14_1
+; X86-NEXT: # BB#3: # %loadbb2
+; X86-NEXT: movl 8(%ecx), %edx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: cmpl 8(%eax), %edx
+; X86-NEXT: je .LBB14_4
+; X86-NEXT: .LBB14_1: # %res_block
+; X86-NEXT: movl $1, %ecx
+; X86-NEXT: .LBB14_4: # %endblock
+; X86-NEXT: testl %ecx, %ecx
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: length12_eq:
-; X64: # BB#0:
-; X64-NEXT: pushq %rax
-; X64-NEXT: movl $12, %edx
-; X64-NEXT: callq memcmp
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: jne .LBB14_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movl 8(%rdi), %ecx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl 8(%rsi), %ecx
+; X64-NEXT: je .LBB14_3
+; X64-NEXT: .LBB14_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB14_3: # %endblock
; X64-NEXT: testl %eax, %eax
; X64-NEXT: setne %al
-; X64-NEXT: popq %rcx
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
%c = icmp ne i32 %m, 0
@@ -427,19 +520,66 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
define i32 @length12(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length12:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $12
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB15_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movl 4(%esi), %ecx
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB15_1
+; X86-NEXT: # BB#3: # %loadbb2
+; X86-NEXT: movl 8(%esi), %ecx
+; X86-NEXT: movl 8(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB15_1
+; X86-NEXT: # BB#4: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+; X86-NEXT: .LBB15_1: # %res_block
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: length12:
-; X64: # BB#0:
-; X64-NEXT: movl $12, %edx
-; X64-NEXT: jmp memcmp # TAILCALL
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB15_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movl 8(%rdi), %ecx
+; X64-NEXT: movl 8(%rsi), %edx
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: bswapl %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB15_1
+; X64-NEXT: # BB#3: # %endblock
+; X64-NEXT: retq
+; X64-NEXT: .LBB15_1: # %res_block
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
ret i32 %m
}
@@ -448,111 +588,165 @@ define i32 @length12(i8* %X, i8* %Y) nounwind {
define i32 @length16(i8* %X, i8* %Y) nounwind {
; X86-LABEL: length16:
-; X86: # BB#0:
-; X86-NEXT: pushl $0
-; X86-NEXT: pushl $16
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NEXT: calll memcmp
-; X86-NEXT: addl $16, %esp
+; X86: # BB#0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB16_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movl 4(%esi), %ecx
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB16_1
+; X86-NEXT: # BB#3: # %loadbb2
+; X86-NEXT: movl 8(%esi), %ecx
+; X86-NEXT: movl 8(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB16_1
+; X86-NEXT: # BB#4: # %loadbb3
+; X86-NEXT: movl 12(%esi), %ecx
+; X86-NEXT: movl 12(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB16_1
+; X86-NEXT: # BB#5: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+; X86-NEXT: .LBB16_1: # %res_block
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: length16:
-; X64: # BB#0:
-; X64-NEXT: movl $16, %edx
-; X64-NEXT: jmp memcmp # TAILCALL
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB16_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: movq 8(%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB16_1
+; X64-NEXT: # BB#3: # %endblock
+; X64-NEXT: retq
+; X64-NEXT: .LBB16_1: # %res_block
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind
ret i32 %m
}
define i1 @length16_eq(i8* %x, i8* %y) nounwind {
-; X86-NOSSE-LABEL: length16_eq:
-; X86-NOSSE: # BB#0:
-; X86-NOSSE-NEXT: pushl $0
-; X86-NOSSE-NEXT: pushl $16
-; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT: calll memcmp
-; X86-NOSSE-NEXT: addl $16, %esp
-; X86-NOSSE-NEXT: testl %eax, %eax
-; X86-NOSSE-NEXT: setne %al
-; X86-NOSSE-NEXT: retl
-;
-; X86-SSE2-LABEL: length16_eq:
-; X86-SSE2: # BB#0:
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
-; X86-SSE2-NEXT: movdqu (%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
-; X86-SSE2-NEXT: pmovmskb %xmm1, %eax
-; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; X86-SSE2-NEXT: setne %al
-; X86-SSE2-NEXT: retl
-;
-; X64-SSE2-LABEL: length16_eq:
-; X64-SSE2: # BB#0:
-; X64-SSE2-NEXT: movdqu (%rsi), %xmm0
-; X64-SSE2-NEXT: movdqu (%rdi), %xmm1
-; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
-; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
-; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; X64-SSE2-NEXT: setne %al
-; X64-SSE2-NEXT: retq
+; X86-LABEL: length16_eq:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %edx
+; X86-NEXT: cmpl (%eax), %edx
+; X86-NEXT: jne .LBB17_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: movl 4(%ecx), %edx
+; X86-NEXT: cmpl 4(%eax), %edx
+; X86-NEXT: jne .LBB17_1
+; X86-NEXT: # BB#3: # %loadbb2
+; X86-NEXT: movl 8(%ecx), %edx
+; X86-NEXT: cmpl 8(%eax), %edx
+; X86-NEXT: jne .LBB17_1
+; X86-NEXT: # BB#4: # %loadbb3
+; X86-NEXT: movl 12(%ecx), %edx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: cmpl 12(%eax), %edx
+; X86-NEXT: je .LBB17_5
+; X86-NEXT: .LBB17_1: # %res_block
+; X86-NEXT: movl $1, %ecx
+; X86-NEXT: .LBB17_5: # %endblock
+; X86-NEXT: testl %ecx, %ecx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
-; X64-AVX2-LABEL: length16_eq:
-; X64-AVX2: # BB#0:
-; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
-; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
-; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
-; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; X64-AVX2-NEXT: setne %al
-; X64-AVX2-NEXT: retq
+; X64-LABEL: length16_eq:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: jne .LBB17_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq 8(%rsi), %rcx
+; X64-NEXT: je .LBB17_3
+; X64-NEXT: .LBB17_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB17_3: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
%cmp = icmp ne i32 %call, 0
ret i1 %cmp
}
define i1 @length16_eq_const(i8* %X) nounwind {
-; X86-NOSSE-LABEL: length16_eq_const:
-; X86-NOSSE: # BB#0:
-; X86-NOSSE-NEXT: pushl $0
-; X86-NOSSE-NEXT: pushl $16
-; X86-NOSSE-NEXT: pushl $.L.str
-; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT: calll memcmp
-; X86-NOSSE-NEXT: addl $16, %esp
-; X86-NOSSE-NEXT: testl %eax, %eax
-; X86-NOSSE-NEXT: sete %al
-; X86-NOSSE-NEXT: retl
-;
-; X86-SSE2-LABEL: length16_eq_const:
-; X86-SSE2: # BB#0:
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT: movdqu (%eax), %xmm0
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
-; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
-; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; X86-SSE2-NEXT: sete %al
-; X86-SSE2-NEXT: retl
-;
-; X64-SSE2-LABEL: length16_eq_const:
-; X64-SSE2: # BB#0:
-; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
-; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
-; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
-; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; X64-SSE2-NEXT: sete %al
-; X64-SSE2-NEXT: retq
+; X86-LABEL: length16_eq_const:
+; X86: # BB#0: # %loadbb
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $858927408, (%eax) # imm = 0x33323130
+; X86-NEXT: jne .LBB18_1
+; X86-NEXT: # BB#2: # %loadbb1
+; X86-NEXT: cmpl $926299444, 4(%eax) # imm = 0x37363534
+; X86-NEXT: jne .LBB18_1
+; X86-NEXT: # BB#3: # %loadbb2
+; X86-NEXT: cmpl $825243960, 8(%eax) # imm = 0x31303938
+; X86-NEXT: jne .LBB18_1
+; X86-NEXT: # BB#4: # %loadbb3
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: cmpl $892613426, 12(%eax) # imm = 0x35343332
+; X86-NEXT: je .LBB18_5
+; X86-NEXT: .LBB18_1: # %res_block
+; X86-NEXT: movl $1, %ecx
+; X86-NEXT: .LBB18_5: # %endblock
+; X86-NEXT: testl %ecx, %ecx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
-; X64-AVX2-LABEL: length16_eq_const:
-; X64-AVX2: # BB#0:
-; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
-; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
-; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
-; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; X64-AVX2-NEXT: sete %al
-; X64-AVX2-NEXT: retq
+; X64-LABEL: length16_eq_const:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
+; X64-NEXT: cmpq %rax, (%rdi)
+; X64-NEXT: jne .LBB18_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: movabsq $3833745473465760056, %rcx # imm = 0x3534333231303938
+; X64-NEXT: cmpq %rcx, 8(%rdi)
+; X64-NEXT: je .LBB18_3
+; X64-NEXT: .LBB18_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB18_3: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
%c = icmp eq i32 %m, 0
ret i1 %c
@@ -570,9 +764,43 @@ define i32 @length32(i8* %X, i8* %Y) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: length32:
-; X64: # BB#0:
-; X64-NEXT: movl $32, %edx
-; X64-NEXT: jmp memcmp # TAILCALL
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB19_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: movq 8(%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB19_1
+; X64-NEXT: # BB#3: # %loadbb2
+; X64-NEXT: movq 16(%rdi), %rcx
+; X64-NEXT: movq 16(%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB19_1
+; X64-NEXT: # BB#4: # %loadbb3
+; X64-NEXT: movq 24(%rdi), %rcx
+; X64-NEXT: movq 24(%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB19_1
+; X64-NEXT: # BB#5: # %endblock
+; X64-NEXT: retq
+; X64-NEXT: .LBB19_1: # %res_block
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: movl $-1, %ecx
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: cmovbl %ecx, %eax
+; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
ret i32 %m
}
@@ -592,25 +820,30 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind {
; X86-NEXT: sete %al
; X86-NEXT: retl
;
-; X64-SSE2-LABEL: length32_eq:
-; X64-SSE2: # BB#0:
-; X64-SSE2-NEXT: pushq %rax
-; X64-SSE2-NEXT: movl $32, %edx
-; X64-SSE2-NEXT: callq memcmp
-; X64-SSE2-NEXT: testl %eax, %eax
-; X64-SSE2-NEXT: sete %al
-; X64-SSE2-NEXT: popq %rcx
-; X64-SSE2-NEXT: retq
-;
-; X64-AVX2-LABEL: length32_eq:
-; X64-AVX2: # BB#0:
-; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
-; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
-; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
-; X64-AVX2-NEXT: cmpl $-1, %eax
-; X64-AVX2-NEXT: sete %al
-; X64-AVX2-NEXT: vzeroupper
-; X64-AVX2-NEXT: retq
+; X64-LABEL: length32_eq:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: jne .LBB20_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movq 8(%rdi), %rax
+; X64-NEXT: cmpq 8(%rsi), %rax
+; X64-NEXT: jne .LBB20_1
+; X64-NEXT: # BB#3: # %loadbb2
+; X64-NEXT: movq 16(%rdi), %rax
+; X64-NEXT: cmpq 16(%rsi), %rax
+; X64-NEXT: jne .LBB20_1
+; X64-NEXT: # BB#4: # %loadbb3
+; X64-NEXT: movq 24(%rdi), %rcx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq 24(%rsi), %rcx
+; X64-NEXT: je .LBB20_5
+; X64-NEXT: .LBB20_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB20_5: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
@@ -629,26 +862,30 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-NEXT: setne %al
; X86-NEXT: retl
;
-; X64-SSE2-LABEL: length32_eq_const:
-; X64-SSE2: # BB#0:
-; X64-SSE2-NEXT: pushq %rax
-; X64-SSE2-NEXT: movl $.L.str, %esi
-; X64-SSE2-NEXT: movl $32, %edx
-; X64-SSE2-NEXT: callq memcmp
-; X64-SSE2-NEXT: testl %eax, %eax
-; X64-SSE2-NEXT: setne %al
-; X64-SSE2-NEXT: popq %rcx
-; X64-SSE2-NEXT: retq
-;
-; X64-AVX2-LABEL: length32_eq_const:
-; X64-AVX2: # BB#0:
-; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
-; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
-; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
-; X64-AVX2-NEXT: cmpl $-1, %eax
-; X64-AVX2-NEXT: setne %al
-; X64-AVX2-NEXT: vzeroupper
-; X64-AVX2-NEXT: retq
+; X64-LABEL: length32_eq_const:
+; X64: # BB#0: # %loadbb
+; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
+; X64-NEXT: cmpq %rax, (%rdi)
+; X64-NEXT: jne .LBB21_1
+; X64-NEXT: # BB#2: # %loadbb1
+; X64-NEXT: movabsq $3833745473465760056, %rax # imm = 0x3534333231303938
+; X64-NEXT: cmpq %rax, 8(%rdi)
+; X64-NEXT: jne .LBB21_1
+; X64-NEXT: # BB#3: # %loadbb2
+; X64-NEXT: movabsq $3689065127958034230, %rax # imm = 0x3332313039383736
+; X64-NEXT: cmpq %rax, 16(%rdi)
+; X64-NEXT: jne .LBB21_1
+; X64-NEXT: # BB#4: # %loadbb3
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: movabsq $3544395820347831604, %rcx # imm = 0x3130393837363534
+; X64-NEXT: cmpq %rcx, 24(%rdi)
+; X64-NEXT: je .LBB21_5
+; X64-NEXT: .LBB21_1: # %res_block
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: .LBB21_5: # %endblock
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind
%c = icmp ne i32 %m, 0
ret i1 %c
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 50a661fcca114..76d750855cd4f 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -105,7 +105,7 @@ define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind {
;
; AVX-LABEL: mul_v4i32c:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [117,117,117,117]
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -523,7 +523,7 @@ define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind {
;
; AVX-LABEL: mul_v8i32c:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} ymm1 = [117,117,117,117,117,117,117,117]
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -551,7 +551,7 @@ define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind {
;
; AVX-LABEL: mul_v4i64c:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [117,117,117,117]
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/popcnt-schedule.ll b/test/CodeGen/X86/popcnt-schedule.ll
new file mode 100644
index 0000000000000..c0d11280fc1da
--- /dev/null
+++ b/test/CodeGen/X86/popcnt-schedule.ll
@@ -0,0 +1,167 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mattr=+popcnt | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=slm | FileCheck %s --check-prefix=CHECK --check-prefix=SLM
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=goldmont | FileCheck %s --check-prefix=CHECK --check-prefix=SLM
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=sandybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=ivybridge | FileCheck %s --check-prefix=CHECK --check-prefix=SANDY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
+; GENERIC-LABEL: test_ctpop_i16:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: popcntw (%rsi), %cx
+; GENERIC-NEXT: popcntw %di, %ax
+; GENERIC-NEXT: orl %ecx, %eax
+; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: retq
+;
+; SLM-LABEL: test_ctpop_i16:
+; SLM: # BB#0:
+; SLM-NEXT: popcntw (%rsi), %cx # sched: [6:1.00]
+; SLM-NEXT: popcntw %di, %ax # sched: [3:1.00]
+; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
+; SLM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_ctpop_i16:
+; SANDY: # BB#0:
+; SANDY-NEXT: popcntw (%rsi), %cx # sched: [7:1.00]
+; SANDY-NEXT: popcntw %di, %ax # sched: [3:1.00]
+; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
+; SANDY-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_ctpop_i16:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: popcntw (%rsi), %cx # sched: [7:1.00]
+; HASWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
+; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_ctpop_i16:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
+; BTVER2-NEXT: popcntw %di, %ax # sched: [3:1.00]
+; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ctpop_i16:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: popcntw (%rsi), %cx # sched: [10:1.00]
+; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00]
+; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i16, i16 *%a1
+ %2 = tail call i16 @llvm.ctpop.i16( i16 %1 )
+ %3 = tail call i16 @llvm.ctpop.i16( i16 %a0 )
+ %4 = or i16 %2, %3
+ ret i16 %4
+}
+declare i16 @llvm.ctpop.i16(i16)
+
+define i32 @test_ctpop_i32(i32 %a0, i32 *%a1) {
+; GENERIC-LABEL: test_ctpop_i32:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: popcntl (%rsi), %ecx
+; GENERIC-NEXT: popcntl %edi, %eax
+; GENERIC-NEXT: orl %ecx, %eax
+; GENERIC-NEXT: retq
+;
+; SLM-LABEL: test_ctpop_i32:
+; SLM: # BB#0:
+; SLM-NEXT: popcntl (%rsi), %ecx # sched: [6:1.00]
+; SLM-NEXT: popcntl %edi, %eax # sched: [3:1.00]
+; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_ctpop_i32:
+; SANDY: # BB#0:
+; SANDY-NEXT: popcntl (%rsi), %ecx # sched: [7:1.00]
+; SANDY-NEXT: popcntl %edi, %eax # sched: [3:1.00]
+; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_ctpop_i32:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: popcntl (%rsi), %ecx # sched: [7:1.00]
+; HASWELL-NEXT: popcntl %edi, %eax # sched: [3:1.00]
+; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_ctpop_i32:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: popcntl (%rsi), %ecx # sched: [8:1.00]
+; BTVER2-NEXT: popcntl %edi, %eax # sched: [3:1.00]
+; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ctpop_i32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: popcntl (%rsi), %ecx # sched: [10:1.00]
+; ZNVER1-NEXT: popcntl %edi, %eax # sched: [3:1.00]
+; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i32, i32 *%a1
+ %2 = tail call i32 @llvm.ctpop.i32( i32 %1 )
+ %3 = tail call i32 @llvm.ctpop.i32( i32 %a0 )
+ %4 = or i32 %2, %3
+ ret i32 %4
+}
+declare i32 @llvm.ctpop.i32(i32)
+
+define i64 @test_ctpop_i64(i64 %a0, i64 *%a1) {
+; GENERIC-LABEL: test_ctpop_i64:
+; GENERIC: # BB#0:
+; GENERIC-NEXT: popcntq (%rsi), %rcx
+; GENERIC-NEXT: popcntq %rdi, %rax
+; GENERIC-NEXT: orq %rcx, %rax
+; GENERIC-NEXT: retq
+;
+; SLM-LABEL: test_ctpop_i64:
+; SLM: # BB#0:
+; SLM-NEXT: popcntq (%rsi), %rcx # sched: [6:1.00]
+; SLM-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
+; SLM-NEXT: orq %rcx, %rax # sched: [1:0.50]
+; SLM-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: test_ctpop_i64:
+; SANDY: # BB#0:
+; SANDY-NEXT: popcntq (%rsi), %rcx # sched: [9:1.00]
+; SANDY-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
+; SANDY-NEXT: orq %rcx, %rax # sched: [1:0.33]
+; SANDY-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-LABEL: test_ctpop_i64:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: popcntq (%rsi), %rcx # sched: [7:1.00]
+; HASWELL-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
+; HASWELL-NEXT: orq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; BTVER2-LABEL: test_ctpop_i64:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: popcntq (%rsi), %rcx # sched: [8:1.00]
+; BTVER2-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
+; BTVER2-NEXT: orq %rcx, %rax # sched: [1:0.50]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ctpop_i64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: popcntq (%rsi), %rcx # sched: [10:1.00]
+; ZNVER1-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
+; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
+ %1 = load i64, i64 *%a1
+ %2 = tail call i64 @llvm.ctpop.i64( i64 %1 )
+ %3 = tail call i64 @llvm.ctpop.i64( i64 %a0 )
+ %4 = or i64 %2, %3
+ ret i64 %4
+}
+declare i64 @llvm.ctpop.i64(i64)
diff --git a/test/CodeGen/X86/pr32282.ll b/test/CodeGen/X86/pr32282.ll
new file mode 100644
index 0000000000000..26c4bdb2375ab
--- /dev/null
+++ b/test/CodeGen/X86/pr32282.ll
@@ -0,0 +1,104 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X64
+
+; Check for assert in foldMaskAndShiftToScale due to out of range mask scaling.
+
+@b = common global i8 zeroinitializer, align 1
+@c = common global i8 zeroinitializer, align 1
+@d = common global i64 zeroinitializer, align 8
+@e = common global i64 zeroinitializer, align 8
+
+define void @foo() {
+; X86-LABEL: foo:
+; X86: # BB#0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: .Lcfi0:
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: movl d, %eax
+; X86-NEXT: movl d+4, %ecx
+; X86-NEXT: movl $701685459, %edx # imm = 0x29D2DED3
+; X86-NEXT: andnl %edx, %ecx, %ecx
+; X86-NEXT: movl $-564453154, %edx # imm = 0xDE5B20DE
+; X86-NEXT: andnl %edx, %eax, %edx
+; X86-NEXT: shrdl $21, %ecx, %edx
+; X86-NEXT: shrl $21, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: testb %al, %al
+; X86-NEXT: cmovnel %ecx, %edx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: andl $-2, %edx
+; X86-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
+; X86-NEXT: addl $7, %edx
+; X86-NEXT: adcxl %eax, %ecx
+; X86-NEXT: pushl %ecx
+; X86-NEXT: .Lcfi1:
+; X86-NEXT: .cfi_adjust_cfa_offset 4
+; X86-NEXT: pushl %edx
+; X86-NEXT: .Lcfi2:
+; X86-NEXT: .cfi_adjust_cfa_offset 4
+; X86-NEXT: pushl $0
+; X86-NEXT: .Lcfi3:
+; X86-NEXT: .cfi_adjust_cfa_offset 4
+; X86-NEXT: pushl $0
+; X86-NEXT: .Lcfi4:
+; X86-NEXT: .cfi_adjust_cfa_offset 4
+; X86-NEXT: calll __divdi3
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: .Lcfi5:
+; X86-NEXT: .cfi_adjust_cfa_offset -16
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: setne {{[0-9]+}}(%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: foo:
+; X64: # BB#0:
+; X64-NEXT: movq {{.*}}(%rip), %rax
+; X64-NEXT: movabsq $3013716102212485120, %rcx # imm = 0x29D2DED3DE400000
+; X64-NEXT: andnq %rcx, %rax, %rcx
+; X64-NEXT: shrq $21, %rcx
+; X64-NEXT: addq $7, %rcx
+; X64-NEXT: movabsq $4393751543808, %rax # imm = 0x3FF00000000
+; X64-NEXT: testq %rax, %rcx
+; X64-NEXT: je .LBB0_1
+; X64-NEXT: # BB#2:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: idivq %rcx
+; X64-NEXT: jmp .LBB0_3
+; X64-NEXT: .LBB0_1:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %ecx
+; X64-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<def>
+; X64-NEXT: .LBB0_3:
+; X64-NEXT: testq %rax, %rax
+; X64-NEXT: setne -{{[0-9]+}}(%rsp)
+; X64-NEXT: retq
+ %1 = alloca i8, align 1
+ %2 = load i64, i64* @d, align 8
+ %3 = or i64 -3013716102214263007, %2
+ %4 = xor i64 %3, -1
+ %5 = load i64, i64* @e, align 8
+ %6 = load i8, i8* @b, align 1
+ %7 = trunc i8 %6 to i1
+ %8 = zext i1 %7 to i64
+ %9 = xor i64 %5, %8
+ %10 = load i8, i8* @c, align 1
+ %11 = trunc i8 %10 to i1
+ %12 = zext i1 %11 to i32
+ %13 = or i32 551409149, %12
+ %14 = sub nsw i32 %13, 551409131
+ %15 = zext i32 %14 to i64
+ %16 = shl i64 %9, %15
+ %17 = sub nsw i64 %16, 223084523
+ %18 = ashr i64 %4, %17
+ %19 = and i64 %18, 9223372036854775806
+ %20 = add nsw i64 7, %19
+ %21 = sdiv i64 0, %20
+ %22 = icmp ne i64 %21, 0
+ %23 = zext i1 %22 to i8
+ store i8 %23, i8* %1, align 1
+ ret void
+}
diff --git a/test/CodeGen/X86/pr32515.ll b/test/CodeGen/X86/pr32515.ll
new file mode 100644
index 0000000000000..aeb6803867aaa
--- /dev/null
+++ b/test/CodeGen/X86/pr32515.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -mtriple=x86_64-unknown -mcpu=skx -o - %s
+; RUN: llc -mtriple=x86_64-unknown -mcpu=skx -o - %s
+; RUN: llc -O0 -mtriple=i686-unknown -mcpu=skx -o - %s
+; RUN: llc -mtriple=i686-unknown -mcpu=skx -o - %s
+; REQUIRES: asserts
+
+@var_26 = external global i16, align 2
+
+define void @foo() #0 {
+ %1 = alloca i16, align 2
+ %2 = load i16, i16* @var_26, align 2
+ %3 = zext i16 %2 to i32
+ %4 = icmp ne i32 %3, 7
+ %5 = zext i1 %4 to i16
+ store i16 %5, i16* %1, align 2
+ %6 = load i16, i16* @var_26, align 2
+ %7 = zext i16 %6 to i32
+ %8 = and i32 1, %7
+ %9 = shl i32 %8, 0
+ %10 = load i16, i16* @var_26, align 2
+ %11 = zext i16 %10 to i32
+ %12 = icmp ne i32 %11, 7
+ %13 = zext i1 %12 to i32
+ %14 = and i32 %9, %13
+ %15 = icmp ne i32 %14, 0
+ %16 = zext i1 %15 to i8
+ store i8 %16, i8* undef, align 1
+ unreachable
+ }
diff --git a/test/CodeGen/X86/pr33772.ll b/test/CodeGen/X86/pr33772.ll
new file mode 100644
index 0000000000000..ff22c7478866b
--- /dev/null
+++ b/test/CodeGen/X86/pr33772.ll
@@ -0,0 +1,15 @@
+; RUN: not llc < %s -mcpu=skylake-avx512 2>&1 | FileCheck %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+; make sure we don't crash if scale for gather isn't constant.
+
+; CHECK: LLVM ERROR: Cannot select: intrinsic %llvm.x86.avx512.gather.dpi.512
+declare <16 x i32> @llvm.x86.avx512.gather.dpi.512(<16 x i32>, i8*, <16 x i32>, i16, i32)
+
+define internal <16 x i32> @__gather_base_offsets32_i32(i8* readonly %ptr, i32 %offset_scale, <16 x i32> %offsets, <16 x i8> %vecmask) {
+ %mask_vec_i1.i.i = icmp ne <16 x i8> %vecmask, zeroinitializer
+ %mask_i16.i = bitcast <16 x i1> %mask_vec_i1.i.i to i16
+ %res = tail call <16 x i32> @llvm.x86.avx512.gather.dpi.512(<16 x i32> undef, i8* %ptr, <16 x i32> %offsets, i16 %mask_i16.i, i32 %offset_scale)
+ ret <16 x i32> %res
+}
diff --git a/test/CodeGen/X86/pr33828.ll b/test/CodeGen/X86/pr33828.ll
new file mode 100644
index 0000000000000..1b7f44323b612
--- /dev/null
+++ b/test/CodeGen/X86/pr33828.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=haswell | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=haswell | FileCheck %s --check-prefix=X64
+
+@var_580 = external local_unnamed_addr global i8, align 1
+
+define void @foo() {
+; X86-LABEL: foo:
+; X86: # BB#0: # %entry
+; X86-NEXT: movsbl var_580, %eax
+; X86-NEXT: testl $-536870913, %eax # imm = 0xDFFFFFFF
+; X86-NEXT: jne .LBB0_1
+; X86-NEXT: # BB#2: # %if.end13
+; X86-NEXT: retl
+; X86-NEXT: .LBB0_1: # %if.then11
+;
+; X64-LABEL: foo:
+; X64: # BB#0: # %entry
+; X64-NEXT: movsbl {{.*}}(%rip), %eax
+; X64-NEXT: testl $-536870913, %eax # imm = 0xDFFFFFFF
+; X64-NEXT: jne .LBB0_1
+; X64-NEXT: # BB#2: # %if.end13
+; X64-NEXT: retq
+; X64-NEXT: .LBB0_1: # %if.then11
+entry:
+ %tmp = icmp ugt i8 undef, 60
+ %phitmp = zext i1 %tmp to i16
+ br label %if.end
+
+if.end:
+ %tmp1 = load i8, i8* @var_580, align 1
+ %conv7 = sext i8 %tmp1 to i32
+ %conv8 = zext i16 %phitmp to i32
+ %mul = shl nuw nsw i32 %conv8, 1
+ %div9 = udiv i32 %mul, 71
+ %sub = add nsw i32 %div9, -3
+ %shl = shl i32 1, %sub
+ %neg = xor i32 %shl, -1
+ %and = and i32 %neg, %conv7
+ %tobool10 = icmp eq i32 %and, 0
+ br i1 %tobool10, label %if.end13, label %if.then11
+
+if.then11:
+ unreachable
+
+if.end13:
+ ret void
+}
diff --git a/test/CodeGen/X86/regparm.ll b/test/CodeGen/X86/regparm.ll
index 9484e5a9490bd..f427010edc516 100644
--- a/test/CodeGen/X86/regparm.ll
+++ b/test/CodeGen/X86/regparm.ll
@@ -1,4 +1,4 @@
-; RUN: llc %s -mtriple=i386-pc-linux -o - | FileCheck -check-prefix=CHECK %s
+; RUN: llc %s -mtriple=i386-pc-linux -o - | FileCheck %s
; RUN: llc %s -mtriple=i386-pc-win32 -o - | FileCheck -check-prefix=WIN %s
; RUN: llc %s -mtriple=i386-pc-linux -fast-isel -o - | FileCheck -check-prefix=FAST %s
; RUN: llc %s -mtriple=i386-pc-win32 -fast-isel -o - | FileCheck -check-prefix=FASTWIN %s
diff --git a/test/CodeGen/X86/rotate_vec.ll b/test/CodeGen/X86/rotate_vec.ll
new file mode 100644
index 0000000000000..8fb000bae827d
--- /dev/null
+++ b/test/CodeGen/X86/rotate_vec.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=bdver4 | FileCheck %s
+
+define <4 x i32> @rot_v4i32_splat(<4 x i32> %x) {
+; CHECK-LABEL: rot_v4i32_splat:
+; CHECK: # BB#0:
+; CHECK-NEXT: vprotd $31, %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
+ %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
+ %3 = or <4 x i32> %1, %2
+ ret <4 x i32> %3
+}
+
+define <4 x i32> @rot_v4i32_non_splat(<4 x i32> %x) {
+; CHECK-LABEL: rot_v4i32_non_splat:
+; CHECK: # BB#0:
+; CHECK-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
+ %2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
+ %3 = or <4 x i32> %1, %2
+ ret <4 x i32> %3
+}
+
+define <4 x i32> @rot_v4i32_splat_2masks(<4 x i32> %x) {
+; CHECK-LABEL: rot_v4i32_splat_2masks:
+; CHECK: # BB#0:
+; CHECK-NEXT: vprotd $31, %xmm0, %xmm0
+; CHECK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
+ %2 = and <4 x i32> %1, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
+
+ %3 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
+ %4 = and <4 x i32> %3, <i32 0, i32 4294901760, i32 0, i32 4294901760>
+ %5 = or <4 x i32> %2, %4
+ ret <4 x i32> %5
+}
+
+define <4 x i32> @rot_v4i32_non_splat_2masks(<4 x i32> %x) {
+; CHECK-LABEL: rot_v4i32_non_splat_2masks:
+; CHECK: # BB#0:
+; CHECK-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
+ %2 = and <4 x i32> %1, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
+
+ %3 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
+ %4 = and <4 x i32> %3, <i32 0, i32 4294901760, i32 0, i32 4294901760>
+ %5 = or <4 x i32> %2, %4
+ ret <4 x i32> %5
+}
diff --git a/test/CodeGen/X86/sibcall-win64.ll b/test/CodeGen/X86/sibcall-win64.ll
index 204e1f8b050ba..b9d5a4813e09a 100644
--- a/test/CodeGen/X86/sibcall-win64.ll
+++ b/test/CodeGen/X86/sibcall-win64.ll
@@ -1,15 +1,15 @@
; RUN: llc < %s -mtriple=x86_64-pc-linux | FileCheck %s
-declare x86_64_win64cc void @win64_callee(i32)
-declare x86_64_win64cc void (i32)* @win64_indirect()
-declare x86_64_win64cc void @win64_other(i32)
+declare win64cc void @win64_callee(i32)
+declare win64cc void (i32)* @win64_indirect()
+declare win64cc void @win64_other(i32)
declare void @sysv_callee(i32)
declare void (i32)* @sysv_indirect()
declare void @sysv_other(i32)
define void @sysv_caller(i32 %p1) {
entry:
- tail call x86_64_win64cc void @win64_callee(i32 %p1)
+ tail call win64cc void @win64_callee(i32 %p1)
ret void
}
@@ -19,7 +19,7 @@ entry:
; CHECK: addq $40, %rsp
; CHECK: retq
-define x86_64_win64cc void @win64_caller(i32 %p1) {
+define win64cc void @win64_caller(i32 %p1) {
entry:
tail call void @sysv_callee(i32 %p1)
ret void
@@ -37,18 +37,18 @@ define void @sysv_matched(i32 %p1) {
; CHECK-LABEL: sysv_matched:
; CHECK: jmp sysv_callee # TAILCALL
-define x86_64_win64cc void @win64_matched(i32 %p1) {
- tail call x86_64_win64cc void @win64_callee(i32 %p1)
+define win64cc void @win64_matched(i32 %p1) {
+ tail call win64cc void @win64_callee(i32 %p1)
ret void
}
; CHECK-LABEL: win64_matched:
; CHECK: jmp win64_callee # TAILCALL
-define x86_64_win64cc void @win64_indirect_caller(i32 %p1) {
- %1 = call x86_64_win64cc void (i32)* @win64_indirect()
- call x86_64_win64cc void @win64_other(i32 0)
- tail call x86_64_win64cc void %1(i32 %p1)
+define win64cc void @win64_indirect_caller(i32 %p1) {
+ %1 = call win64cc void (i32)* @win64_indirect()
+ call win64cc void @win64_other(i32 0)
+ tail call win64cc void %1(i32 %p1)
ret void
}
diff --git a/test/CodeGen/X86/sse-schedule.ll b/test/CodeGen/X86/sse-schedule.ll
index c41acd43b3ab6..29f726c3df6a7 100644
--- a/test/CodeGen/X86/sse-schedule.ll
+++ b/test/CodeGen/X86/sse-schedule.ll
@@ -7,7 +7,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; GENERIC-LABEL: test_addps:
@@ -45,6 +45,12 @@ define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_addps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fadd <4 x float> %1, %2
@@ -87,6 +93,12 @@ define float @test_addss(float %a0, float %a1, float *%a2) {
; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_addss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd float %a0, %a1
%2 = load float, float *%a2, align 4
%3 = fadd float %1, %2
@@ -137,6 +149,12 @@ define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_andps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <4 x float> %a0 to <4 x i32>
%2 = bitcast <4 x float> %a1 to <4 x i32>
%3 = and <4 x i32> %1, %2
@@ -191,6 +209,12 @@ define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; BTVER2-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_andnotps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <4 x float> %a0 to <4 x i32>
%2 = bitcast <4 x float> %a1 to <4 x i32>
%3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -245,6 +269,13 @@ define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cmpps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
+; ZNVER1-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fcmp oeq <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fcmp oeq <4 x float> %a0, %2
@@ -290,6 +321,12 @@ define float @test_cmpss(float %a0, float %a1, float *%a2) {
; BTVER2-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cmpss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = insertelement <4 x float> undef, float %a1, i32 0
%3 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %1, <4 x float> %2, i8 0)
@@ -385,6 +422,20 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50]
; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_comiss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
+; ZNVER1-NEXT: sete %cl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25]
+; ZNVER1-NEXT: vcomiss (%rdi), %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
+; ZNVER1-NEXT: sete %dl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
+; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
+; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 4
%3 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %2)
@@ -435,6 +486,13 @@ define float @test_cvtsi2ss(i32 %a0, i32 *%a1) {
; BTVER2-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtsi2ss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sitofp i32 %a0 to float
%2 = load i32, i32 *%a1, align 4
%3 = sitofp i32 %2 to float
@@ -484,6 +542,13 @@ define float @test_cvtsi2ssq(i64 %a0, i64 *%a1) {
; BTVER2-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtsi2ssq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sitofp i64 %a0 to float
%2 = load i64, i64 *%a1, align 8
%3 = sitofp i64 %2 to float
@@ -533,6 +598,13 @@ define i32 @test_cvtss2si(float %a0, float *%a1) {
; BTVER2-NEXT: vcvtss2si %xmm0, %ecx # sched: [3:1.00]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtss2si:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtss2si (%rdi), %eax # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtss2si %xmm0, %ecx # sched: [5:1.00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %1)
%3 = load float, float *%a1, align 4
@@ -585,6 +657,13 @@ define i64 @test_cvtss2siq(float %a0, float *%a1) {
; BTVER2-NEXT: vcvtss2si %xmm0, %rcx # sched: [3:1.00]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtss2siq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtss2si (%rdi), %rax # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtss2si %xmm0, %rcx # sched: [5:1.00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %1)
%3 = load float, float *%a1, align 4
@@ -637,6 +716,13 @@ define i32 @test_cvttss2si(float %a0, float *%a1) {
; BTVER2-NEXT: vcvttss2si %xmm0, %ecx # sched: [3:1.00]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvttss2si:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvttss2si (%rdi), %eax # sched: [12:1.00]
+; ZNVER1-NEXT: vcvttss2si %xmm0, %ecx # sched: [5:1.00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptosi float %a0 to i32
%2 = load float, float *%a1, align 4
%3 = fptosi float %2 to i32
@@ -686,6 +772,13 @@ define i64 @test_cvttss2siq(float %a0, float *%a1) {
; BTVER2-NEXT: vcvttss2si %xmm0, %rcx # sched: [3:1.00]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvttss2siq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvttss2si (%rdi), %rax # sched: [12:1.00]
+; ZNVER1-NEXT: vcvttss2si %xmm0, %rcx # sched: [5:1.00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptosi float %a0 to i64
%2 = load float, float *%a1, align 4
%3 = fptosi float %2 to i64
@@ -729,6 +822,12 @@ define <4 x float> @test_divps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_divps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
+; ZNVER1-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fdiv <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fdiv <4 x float> %1, %2
@@ -771,6 +870,12 @@ define float @test_divss(float %a0, float %a1, float *%a2) {
; BTVER2-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_divss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
+; ZNVER1-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fdiv float %a0, %a1
%2 = load float, float *%a2, align 4
%3 = fdiv float %1, %2
@@ -813,6 +918,12 @@ define void @test_ldmxcsr(i32 %a0) {
; BTVER2-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; BTVER2-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ldmxcsr:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50]
+; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = alloca i32, align 4
%2 = bitcast i32* %1 to i8*
store i32 %a0, i32* %1
@@ -857,6 +968,12 @@ define <4 x float> @test_maxps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_maxps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %2)
@@ -900,6 +1017,12 @@ define <4 x float> @test_maxss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_maxss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %1, <4 x float> %2)
@@ -943,6 +1066,12 @@ define <4 x float> @test_minps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_minps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %1, <4 x float> %2)
@@ -986,6 +1115,12 @@ define <4 x float> @test_minss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_minss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %2)
@@ -1035,6 +1170,13 @@ define void @test_movaps(<4 x float> *%a0, <4 x float> *%a1) {
; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movaps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovaps (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <4 x float>, <4 x float> *%a0, align 16
%2 = fadd <4 x float> %1, %1
store <4 x float> %2, <4 x float> *%a1, align 16
@@ -1079,6 +1221,11 @@ define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movhlps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
ret <4 x float> %1
}
@@ -1129,6 +1276,13 @@ define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movhps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast x86_mmx* %a2 to <2 x float>*
%2 = load <2 x float>, <2 x float> *%1, align 8
%3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1177,6 +1331,12 @@ define <4 x float> @test_movlhps(<4 x float> %a0, <4 x float> %a1) {
; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movlhps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
+; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
%2 = fadd <4 x float> %a1, %1
ret <4 x float> %2
@@ -1224,6 +1384,13 @@ define void @test_movlps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movlps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast x86_mmx* %a2 to <2 x float>*
%2 = load <2 x float>, <2 x float> *%1, align 8
%3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1266,6 +1433,11 @@ define i32 @test_movmskps(<4 x float> %a0) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movmskps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
ret i32 %1
}
@@ -1307,6 +1479,11 @@ define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movntps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0
ret void
}
@@ -1353,6 +1530,13 @@ define void @test_movss_mem(float* %a0, float* %a1) {
; BTVER2-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movss_mem:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50]
+; ZNVER1-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovss %xmm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load float, float* %a0, align 1
%2 = fadd float %1, %1
store float %2, float *%a1, align 1
@@ -1395,6 +1579,11 @@ define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movss_reg:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
ret <4 x float> %1
}
@@ -1441,6 +1630,13 @@ define void @test_movups(<4 x float> *%a0, <4 x float> *%a1) {
; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movups:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovups (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovups %xmm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <4 x float>, <4 x float> *%a0, align 1
%2 = fadd <4 x float> %1, %1
store <4 x float> %2, <4 x float> *%a1, align 1
@@ -1483,6 +1679,12 @@ define <4 x float> @test_mulps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_mulps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fmul <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fmul <4 x float> %1, %2
@@ -1525,6 +1727,12 @@ define float @test_mulss(float %a0, float %a1, float *%a2) {
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_mulss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fmul float %a0, %a1
%2 = load float, float *%a2, align 4
%3 = fmul float %1, %2
@@ -1575,6 +1783,12 @@ define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2
; BTVER2-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_orps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <4 x float> %a0 to <4 x i32>
%2 = bitcast <4 x float> %a1 to <4 x i32>
%3 = or <4 x i32> %1, %2
@@ -1621,6 +1835,11 @@ define void @test_prefetchnta(i8* %a0) {
; BTVER2: # BB#0:
; BTVER2-NEXT: prefetchnta (%rdi) # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_prefetchnta:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: prefetchnta (%rdi) # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
ret void
}
@@ -1670,6 +1889,13 @@ define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vrcpps %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_rcpps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vrcpps (%rdi), %xmm1 # sched: [12:0.50]
+; ZNVER1-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:0.50]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %2)
@@ -1728,6 +1954,14 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
; BTVER2-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [7:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_rcpss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
+; ZNVER1-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %1)
%3 = load float, float *%a1, align 4
@@ -1782,6 +2016,13 @@ define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_rsqrtps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [12:0.50]
+; ZNVER1-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:0.50]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %2)
@@ -1840,6 +2081,14 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
; BTVER2-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [7:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_rsqrtss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
+; ZNVER1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %1)
%3 = load float, float *%a1, align 4
@@ -1886,6 +2135,11 @@ define void @test_sfence() {
; BTVER2: # BB#0:
; BTVER2-NEXT: sfence # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_sfence:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: sfence # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
call void @llvm.x86.sse.sfence()
ret void
}
@@ -1931,6 +2185,12 @@ define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
; BTVER2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
; BTVER2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_shufps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
+; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 0, i32 4, i32 4>
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 3, i32 4, i32 4>
@@ -1980,6 +2240,13 @@ define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vsqrtps %xmm0, %xmm0 # sched: [21:21.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_sqrtps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vsqrtps (%rdi), %xmm1 # sched: [27:1.00]
+; ZNVER1-NEXT: vsqrtps %xmm0, %xmm0 # sched: [20:1.00]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %2)
@@ -2038,6 +2305,14 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [26:21.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_sqrtss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovaps (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
+; ZNVER1-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %2)
@@ -2082,6 +2357,12 @@ define i32 @test_stmxcsr() {
; BTVER2-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00]
; BTVER2-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_stmxcsr:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:0.50]
+; ZNVER1-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = alloca i32, align 4
%2 = bitcast i32* %1 to i8*
call void @llvm.x86.sse.stmxcsr(i8* %2)
@@ -2126,6 +2407,12 @@ define <4 x float> @test_subps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_subps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fsub <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fsub <4 x float> %1, %2
@@ -2168,6 +2455,12 @@ define float @test_subss(float %a0, float %a1, float *%a2) {
; BTVER2-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_subss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fsub float %a0, %a1
%2 = load float, float *%a2, align 4
%3 = fsub float %1, %2
@@ -2258,6 +2551,20 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50]
; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ucomiss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
+; ZNVER1-NEXT: sete %cl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25]
+; ZNVER1-NEXT: vucomiss (%rdi), %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
+; ZNVER1-NEXT: sete %dl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
+; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
+; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 4
%3 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %2)
@@ -2306,6 +2613,12 @@ define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_unpckhps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
+; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -2352,6 +2665,12 @@ define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_unpcklps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
+; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -2402,6 +2721,12 @@ define <4 x float> @test_xorps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
; BTVER2-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_xorps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <4 x float> %a0 to <4 x i32>
%2 = bitcast <4 x float> %a1 to <4 x i32>
%3 = xor <4 x i32> %1, %2
diff --git a/test/CodeGen/X86/sse2-schedule.ll b/test/CodeGen/X86/sse2-schedule.ll
index 3c36b21381390..6ee908e0c7871 100644
--- a/test/CodeGen/X86/sse2-schedule.ll
+++ b/test/CodeGen/X86/sse2-schedule.ll
@@ -7,7 +7,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
define <2 x double> @test_addpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_addpd:
@@ -45,6 +45,12 @@ define <2 x double> @test_addpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_addpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fadd <2 x double> %1, %2
@@ -87,6 +93,12 @@ define double @test_addsd(double %a0, double %a1, double *%a2) {
; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_addsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd double %a0, %a1
%2 = load double, double *%a2, align 8
%3 = fadd double %1, %2
@@ -135,6 +147,13 @@ define <2 x double> @test_andpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_andpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <2 x double> %a0 to <4 x i32>
%2 = bitcast <2 x double> %a1 to <4 x i32>
%3 = and <4 x i32> %1, %2
@@ -188,6 +207,13 @@ define <2 x double> @test_andnotpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; BTVER2-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_andnotpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <2 x double> %a0 to <4 x i32>
%2 = bitcast <2 x double> %a1 to <4 x i32>
%3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -243,6 +269,13 @@ define <2 x double> @test_cmppd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cmppd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
+; ZNVER1-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fcmp oeq <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fcmp oeq <2 x double> %a0, %2
@@ -288,6 +321,12 @@ define double @test_cmpsd(double %a0, double %a1, double *%a2) {
; BTVER2-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cmpsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <2 x double> undef, double %a0, i32 0
%2 = insertelement <2 x double> undef, double %a1, i32 0
%3 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %1, <2 x double> %2, i8 0)
@@ -383,6 +422,20 @@ define i32 @test_comisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50]
; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_comisd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcomisd %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
+; ZNVER1-NEXT: sete %cl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25]
+; ZNVER1-NEXT: vcomisd (%rdi), %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
+; ZNVER1-NEXT: sete %dl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
+; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
+; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 8
%3 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %2)
@@ -433,6 +486,13 @@ define <2 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
; BTVER2-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtdq2pd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%2 = sitofp <2 x i32> %1 to <2 x double>
%3 = load <4 x i32>, <4 x i32>*%a1, align 16
@@ -485,6 +545,13 @@ define <4 x float> @test_cvtdq2ps(<4 x i32> %a0, <4 x i32> *%a1) {
; BTVER2-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtdq2ps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sitofp <4 x i32> %a0 to <4 x float>
%2 = load <4 x i32>, <4 x i32>*%a1, align 16
%3 = sitofp <4 x i32> %2 to <4 x float>
@@ -535,6 +602,13 @@ define <4 x i32> @test_cvtpd2dq(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtpd2dq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %2)
@@ -586,6 +660,13 @@ define <4 x float> @test_cvtpd2ps(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtpd2ps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %2)
@@ -637,6 +718,13 @@ define <4 x i32> @test_cvtps2dq(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtps2dq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %2)
@@ -688,6 +776,13 @@ define <2 x double> @test_cvtps2pd(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtps2pd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%2 = fpext <2 x float> %1 to <2 x double>
%3 = load <4 x float>, <4 x float> *%a1, align 16
@@ -739,6 +834,13 @@ define i32 @test_cvtsd2si(double %a0, double *%a1) {
; BTVER2-NEXT: vcvtsd2si %xmm0, %ecx # sched: [3:1.00]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtsd2si:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtsd2si (%rdi), %eax # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtsd2si %xmm0, %ecx # sched: [5:1.00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <2 x double> undef, double %a0, i32 0
%2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %1)
%3 = load double, double *%a1, align 8
@@ -791,6 +893,13 @@ define i64 @test_cvtsd2siq(double %a0, double *%a1) {
; BTVER2-NEXT: vcvtsd2si %xmm0, %rcx # sched: [3:1.00]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtsd2siq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtsd2si (%rdi), %rax # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtsd2si %xmm0, %rcx # sched: [5:1.00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <2 x double> undef, double %a0, i32 0
%2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %1)
%3 = load double, double *%a1, align 8
@@ -850,6 +959,14 @@ define float @test_cvtsd2ss(double %a0, double *%a1) {
; BTVER2-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [3:1.00]
; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtsd2ss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50]
+; ZNVER1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptrunc double %a0 to float
%2 = load double, double *%a1, align 8
%3 = fptrunc double %2 to float
@@ -899,6 +1016,13 @@ define double @test_cvtsi2sd(i32 %a0, i32 *%a1) {
; BTVER2-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtsi2sd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sitofp i32 %a0 to double
%2 = load i32, i32 *%a1, align 8
%3 = sitofp i32 %2 to double
@@ -948,6 +1072,13 @@ define double @test_cvtsi2sdq(i64 %a0, i64 *%a1) {
; BTVER2-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [8:1.00]
; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtsi2sdq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sitofp i64 %a0 to double
%2 = load i64, i64 *%a1, align 8
%3 = sitofp i64 %2 to double
@@ -1006,6 +1137,14 @@ define double @test_cvtss2sd(float %a0, float *%a1) {
; BTVER2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [3:1.00]
; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvtss2sd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
+; ZNVER1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
+; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fpext float %a0 to double
%2 = load float, float *%a1, align 4
%3 = fpext float %2 to double
@@ -1056,6 +1195,13 @@ define <4 x i32> @test_cvttpd2dq(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvttpd2dq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptosi <2 x double> %a0 to <2 x i32>
%2 = shufflevector <2 x i32> %1, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%3 = load <2 x double>, <2 x double> *%a1, align 16
@@ -1108,6 +1254,13 @@ define <4 x i32> @test_cvttps2dq(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvttps2dq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptosi <4 x float> %a0 to <4 x i32>
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = fptosi <4 x float> %2 to <4 x i32>
@@ -1157,6 +1310,13 @@ define i32 @test_cvttsd2si(double %a0, double *%a1) {
; BTVER2-NEXT: vcvttsd2si %xmm0, %ecx # sched: [3:1.00]
; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvttsd2si:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvttsd2si (%rdi), %eax # sched: [12:1.00]
+; ZNVER1-NEXT: vcvttsd2si %xmm0, %ecx # sched: [5:1.00]
+; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptosi double %a0 to i32
%2 = load double, double *%a1, align 8
%3 = fptosi double %2 to i32
@@ -1206,6 +1366,13 @@ define i64 @test_cvttsd2siq(double %a0, double *%a1) {
; BTVER2-NEXT: vcvttsd2si %xmm0, %rcx # sched: [3:1.00]
; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_cvttsd2siq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vcvttsd2si (%rdi), %rax # sched: [12:1.00]
+; ZNVER1-NEXT: vcvttsd2si %xmm0, %rcx # sched: [5:1.00]
+; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fptosi double %a0 to i64
%2 = load double, double *%a1, align 8
%3 = fptosi double %2 to i64
@@ -1249,6 +1416,12 @@ define <2 x double> @test_divpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_divpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
+; ZNVER1-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fdiv <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fdiv <2 x double> %1, %2
@@ -1291,6 +1464,12 @@ define double @test_divsd(double %a0, double %a1, double *%a2) {
; BTVER2-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [19:19.00]
; BTVER2-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [24:19.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_divsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
+; ZNVER1-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fdiv double %a0, %a1
%2 = load double, double *%a2, align 8
%3 = fdiv double %1, %2
@@ -1333,6 +1512,11 @@ define void @test_lfence() {
; BTVER2: # BB#0:
; BTVER2-NEXT: lfence # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lfence:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: lfence # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
call void @llvm.x86.sse2.lfence()
ret void
}
@@ -1374,6 +1558,11 @@ define void @test_mfence() {
; BTVER2: # BB#0:
; BTVER2-NEXT: mfence # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_mfence:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: mfence # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
call void @llvm.x86.sse2.mfence()
ret void
}
@@ -1413,6 +1602,11 @@ define void @test_maskmovdqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_maskmovdqu:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2)
ret void
}
@@ -1454,6 +1648,12 @@ define <2 x double> @test_maxpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_maxpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %1, <2 x double> %2)
@@ -1497,6 +1697,12 @@ define <2 x double> @test_maxsd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_maxsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %1, <2 x double> %2)
@@ -1540,6 +1746,12 @@ define <2 x double> @test_minpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_minpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %1, <2 x double> %2)
@@ -1583,6 +1795,12 @@ define <2 x double> @test_minsd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_minsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %1, <2 x double> %2)
@@ -1632,6 +1850,13 @@ define void @test_movapd(<2 x double> *%a0, <2 x double> *%a1) {
; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movapd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovapd (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <2 x double>, <2 x double> *%a0, align 16
%2 = fadd <2 x double> %1, %1
store <2 x double> %2, <2 x double> *%a1, align 16
@@ -1680,6 +1905,13 @@ define void @test_movdqa(<2 x i64> *%a0, <2 x i64> *%a1) {
; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movdqa:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovdqa (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <2 x i64>, <2 x i64> *%a0, align 16
%2 = add <2 x i64> %1, %1
store <2 x i64> %2, <2 x i64> *%a1, align 16
@@ -1728,6 +1960,13 @@ define void @test_movdqu(<2 x i64> *%a0, <2 x i64> *%a1) {
; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movdqu:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovdqu (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <2 x i64>, <2 x i64> *%a0, align 1
%2 = add <2 x i64> %1, %1
store <2 x i64> %2, <2 x i64> *%a1, align 1
@@ -1794,6 +2033,16 @@ define i32 @test_movd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; BTVER2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovd %xmm0, %eax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [8:0.50]
+; ZNVER1-NEXT: vmovd %edi, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovd %xmm1, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovd %xmm0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <4 x i32> undef, i32 %a1, i32 0
%2 = load i32, i32 *%a2
%3 = insertelement <4 x i32> undef, i32 %2, i32 0
@@ -1865,6 +2114,16 @@ define i64 @test_movd_64(<2 x i64> %a0, i64 %a1, i64 *%a2) {
; BTVER2-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovq %xmm0, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movd_64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [8:0.50]
+; ZNVER1-NEXT: vmovq %rdi, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovq %xmm1, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovq %xmm0, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <2 x i64> undef, i64 %a1, i64 0
%2 = load i64, i64 *%a2
%3 = insertelement <2 x i64> undef, i64 %2, i64 0
@@ -1918,6 +2177,13 @@ define void @test_movhpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) {
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movhpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast x86_mmx* %a2 to double*
%2 = load double, double *%1, align 8
%3 = insertelement <2 x double> %a1, double %2, i32 1
@@ -1969,6 +2235,13 @@ define void @test_movlpd(<2 x double> %a0, <2 x double> %a1, x86_mmx *%a2) {
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movlpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast x86_mmx* %a2 to double*
%2 = load double, double *%1, align 8
%3 = insertelement <2 x double> %a1, double %2, i32 0
@@ -2010,6 +2283,11 @@ define i32 @test_movmskpd(<2 x double> %a0) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovmskpd %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movmskpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovmskpd %xmm0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
ret i32 %1
}
@@ -2053,6 +2331,12 @@ define void @test_movntdqa(<2 x i64> %a0, <2 x i64> *%a1) {
; BTVER2-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movntdqa:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <2 x i64> %a0, %a0
store <2 x i64> %1, <2 x i64> *%a1, align 16, !nontemporal !0
ret void
@@ -2094,6 +2378,12 @@ define void @test_movntpd(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movntpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fadd <2 x double> %a0, %a0
store <2 x double> %1, <2 x double> *%a1, align 16, !nontemporal !0
ret void
@@ -2141,6 +2431,13 @@ define <2 x i64> @test_movq_mem(<2 x i64> %a0, i64 *%a1) {
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vmovq %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movq_mem:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovq %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load i64, i64* %a1, align 1
%2 = insertelement <2 x i64> zeroinitializer, i64 %1, i32 0
%3 = add <2 x i64> %a0, %2
@@ -2187,6 +2484,12 @@ define <2 x i64> @test_movq_reg(<2 x i64> %a0, <2 x i64> %a1) {
; BTVER2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movq_reg:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
%2 = add <2 x i64> %a1, %1
ret <2 x i64> %2
@@ -2234,6 +2537,13 @@ define void @test_movsd_mem(double* %a0, double* %a1) {
; BTVER2-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movsd_mem:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [8:0.50]
+; ZNVER1-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load double, double* %a0, align 1
%2 = fadd double %1, %1
store double %2, double *%a1, align 1
@@ -2277,6 +2587,11 @@ define <2 x double> @test_movsd_reg(<2 x double> %a0, <2 x double> %a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movsd_reg:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 2, i32 0>
ret <2 x double> %1
}
@@ -2323,6 +2638,13 @@ define void @test_movupd(<2 x double> *%a0, <2 x double> *%a1) {
; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movupd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovupd (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <2 x double>, <2 x double> *%a0, align 1
%2 = fadd <2 x double> %1, %1
store <2 x double> %2, <2 x double> *%a1, align 1
@@ -2365,6 +2687,12 @@ define <2 x double> @test_mulpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_mulpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fmul <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fmul <2 x double> %1, %2
@@ -2407,6 +2735,12 @@ define double @test_mulsd(double %a0, double %a1, double *%a2) {
; BTVER2-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_mulsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fmul double %a0, %a1
%2 = load double, double *%a2, align 8
%3 = fmul double %1, %2
@@ -2455,6 +2789,13 @@ define <2 x double> @test_orpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_orpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <2 x double> %a0 to <4 x i32>
%2 = bitcast <2 x double> %a1 to <4 x i32>
%3 = or <4 x i32> %1, %2
@@ -2510,6 +2851,12 @@ define <8 x i16> @test_packssdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_packssdw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = bitcast <8 x i16> %1 to <4 x i32>
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
@@ -2562,6 +2909,12 @@ define <16 x i8> @test_packsswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_packsswb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = bitcast <16 x i8> %1 to <8 x i16>
%3 = load <8 x i16>, <8 x i16> *%a2, align 16
@@ -2614,6 +2967,12 @@ define <16 x i8> @test_packuswb(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_packuswb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = bitcast <16 x i8> %1 to <8 x i16>
%3 = load <8 x i16>, <8 x i16> *%a2, align 16
@@ -2662,6 +3021,12 @@ define <16 x i8> @test_paddb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_paddb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <16 x i8> %a0, %a1
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = add <16 x i8> %1, %2
@@ -2708,6 +3073,12 @@ define <4 x i32> @test_paddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_paddd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = add <4 x i32> %1, %2
@@ -2750,6 +3121,12 @@ define <2 x i64> @test_paddq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_paddq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = add <2 x i64> %1, %2
@@ -2796,6 +3173,12 @@ define <16 x i8> @test_paddsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_paddsb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %1, <16 x i8> %2)
@@ -2843,6 +3226,12 @@ define <8 x i16> @test_paddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_paddsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %1, <8 x i16> %2)
@@ -2890,6 +3279,12 @@ define <16 x i8> @test_paddusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_paddusb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %1, <16 x i8> %2)
@@ -2937,6 +3332,12 @@ define <8 x i16> @test_paddusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_paddusw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %1, <8 x i16> %2)
@@ -2984,6 +3385,12 @@ define <8 x i16> @test_paddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_paddw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = add <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = add <8 x i16> %1, %2
@@ -3032,6 +3439,13 @@ define <2 x i64> @test_pand(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pand:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = and <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = and <2 x i64> %1, %2
@@ -3087,6 +3501,13 @@ define <2 x i64> @test_pandn(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pandn:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = xor <2 x i64> %a0, <i64 -1, i64 -1>
%2 = and <2 x i64> %a1, %1
%3 = load <2 x i64>, <2 x i64> *%a2, align 16
@@ -3136,6 +3557,12 @@ define <16 x i8> @test_pavgb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pavgb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %1, <16 x i8> %2)
@@ -3183,6 +3610,12 @@ define <8 x i16> @test_pavgw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pavgw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %1, <8 x i16> %2)
@@ -3234,6 +3667,13 @@ define <16 x i8> @test_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpeqb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = icmp eq <16 x i8> %a0, %a1
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = icmp eq <16 x i8> %a0, %2
@@ -3286,6 +3726,13 @@ define <4 x i32> @test_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpeqd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = icmp eq <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = icmp eq <4 x i32> %a0, %2
@@ -3338,6 +3785,13 @@ define <8 x i16> @test_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpeqw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = icmp eq <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = icmp eq <8 x i16> %a0, %2
@@ -3391,6 +3845,13 @@ define <16 x i8> @test_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpgtb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = icmp sgt <16 x i8> %a0, %a1
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = icmp sgt <16 x i8> %a0, %2
@@ -3444,6 +3905,13 @@ define <4 x i32> @test_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpgtd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = icmp sgt <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = icmp eq <4 x i32> %a0, %2
@@ -3497,6 +3965,13 @@ define <8 x i16> @test_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpgtw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = icmp sgt <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = icmp sgt <8 x i16> %a0, %2
@@ -3541,6 +4016,12 @@ define i16 @test_pextrw(<8 x i16> %a0) {
; BTVER2-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pextrw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = extractelement <8 x i16> %a0, i32 6
ret i16 %1
}
@@ -3585,6 +4066,12 @@ define <8 x i16> @test_pinsrw(<8 x i16> %a0, i16 %a1, i16 *%a2) {
; BTVER2-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pinsrw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <8 x i16> %a0, i16 %a1, i32 1
%2 = load i16, i16 *%a2
%3 = insertelement <8 x i16> %1, i16 %2, i32 3
@@ -3635,6 +4122,12 @@ define <4 x i32> @test_pmaddwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmaddwd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
%2 = bitcast <4 x i32> %1 to <8 x i16>
%3 = load <8 x i16>, <8 x i16> *%a2, align 16
@@ -3683,6 +4176,12 @@ define <8 x i16> @test_pmaxsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmaxsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %1, <8 x i16> %2)
@@ -3730,6 +4229,12 @@ define <16 x i8> @test_pmaxub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmaxub:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %1, <16 x i8> %2)
@@ -3777,6 +4282,12 @@ define <8 x i16> @test_pminsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pminsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %1, <8 x i16> %2)
@@ -3824,6 +4335,12 @@ define <16 x i8> @test_pminub(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pminub:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %1, <16 x i8> %2)
@@ -3863,6 +4380,11 @@ define i32 @test_pmovmskb(<16 x i8> %a0) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vpmovmskb %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovmskb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovmskb %xmm0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0)
ret i32 %1
}
@@ -3904,6 +4426,12 @@ define <8 x i16> @test_pmulhuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmulhuw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %1, <8 x i16> %2)
@@ -3947,6 +4475,12 @@ define <8 x i16> @test_pmulhw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmulhw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %1, <8 x i16> %2)
@@ -3990,6 +4524,12 @@ define <8 x i16> @test_pmullw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmullw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = mul <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = mul <8 x i16> %1, %2
@@ -4040,6 +4580,12 @@ define <2 x i64> @test_pmuludq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmuludq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1)
%2 = bitcast <2 x i64> %1 to <4 x i32>
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
@@ -4090,6 +4636,13 @@ define <2 x i64> @test_por(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_por:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = or <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = or <2 x i64> %1, %2
@@ -4141,6 +4694,12 @@ define <2 x i64> @test_psadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psadbw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1)
%2 = bitcast <2 x i64> %1 to <16 x i8>
%3 = load <16 x i8>, <16 x i8> *%a2, align 16
@@ -4193,6 +4752,13 @@ define <4 x i32> @test_pshufd(<4 x i32> %a0, <4 x i32> *%a1) {
; BTVER2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pshufd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
+; ZNVER1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.25]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
%3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -4244,6 +4810,13 @@ define <8 x i16> @test_pshufhw(<8 x i16> %a0, <8 x i16> *%a1) {
; BTVER2-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.50]
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pshufhw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [8:0.50]
+; ZNVER1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.25]
+; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 7, i32 6>
%2 = load <8 x i16>, <8 x i16> *%a1, align 16
%3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4>
@@ -4295,6 +4868,13 @@ define <8 x i16> @test_pshuflw(<8 x i16> %a0, <8 x i16> *%a1) {
; BTVER2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.50]
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pshuflw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [8:0.50]
+; ZNVER1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.25]
+; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7>
%2 = load <8 x i16>, <8 x i16> *%a1, align 16
%3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
@@ -4344,6 +4924,13 @@ define <4 x i32> @test_pslld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pslld:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %1, <4 x i32> %2)
@@ -4389,6 +4976,11 @@ define <4 x i32> @test_pslldq(<4 x i32> %a0) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pslldq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
ret <4 x i32> %1
}
@@ -4435,6 +5027,13 @@ define <2 x i64> @test_psllq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psllq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %1, <2 x i64> %2)
@@ -4486,6 +5085,13 @@ define <8 x i16> @test_psllw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psllw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %1, <8 x i16> %2)
@@ -4537,6 +5143,13 @@ define <4 x i32> @test_psrad(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psrad:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> %2)
@@ -4588,6 +5201,13 @@ define <8 x i16> @test_psraw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psraw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> %2)
@@ -4639,6 +5259,13 @@ define <4 x i32> @test_psrld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psrld:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %1, <4 x i32> %2)
@@ -4684,6 +5311,11 @@ define <4 x i32> @test_psrldq(<4 x i32> %a0) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psrldq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
ret <4 x i32> %1
}
@@ -4730,6 +5362,13 @@ define <2 x i64> @test_psrlq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psrlq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %1, <2 x i64> %2)
@@ -4781,6 +5420,13 @@ define <8 x i16> @test_psrlw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psrlw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %1, <8 x i16> %2)
@@ -4830,6 +5476,12 @@ define <16 x i8> @test_psubb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psubb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sub <16 x i8> %a0, %a1
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = sub <16 x i8> %1, %2
@@ -4876,6 +5528,12 @@ define <4 x i32> @test_psubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psubd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sub <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = sub <4 x i32> %1, %2
@@ -4918,6 +5576,12 @@ define <2 x i64> @test_psubq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psubq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sub <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = sub <2 x i64> %1, %2
@@ -4964,6 +5628,12 @@ define <16 x i8> @test_psubsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psubsb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %1, <16 x i8> %2)
@@ -5011,6 +5681,12 @@ define <8 x i16> @test_psubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psubsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %1, <8 x i16> %2)
@@ -5058,6 +5734,12 @@ define <16 x i8> @test_psubusb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psubusb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %1, <16 x i8> %2)
@@ -5105,6 +5787,12 @@ define <8 x i16> @test_psubusw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psubusw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %1, <8 x i16> %2)
@@ -5152,6 +5840,12 @@ define <8 x i16> @test_psubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psubw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = sub <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = sub <8 x i16> %1, %2
@@ -5198,6 +5892,12 @@ define <16 x i8> @test_punpckhbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.50]
; BTVER2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_punpckhbw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.25]
+; ZNVER1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -5248,6 +5948,13 @@ define <4 x i32> @test_punpckhdq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_punpckhdq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25]
+; ZNVER1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [8:0.50]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -5297,6 +6004,13 @@ define <2 x i64> @test_punpckhqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2)
; BTVER2-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_punpckhqdq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.25]
+; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> <i32 1, i32 3>
@@ -5344,6 +6058,12 @@ define <8 x i16> @test_punpckhwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
; BTVER2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_punpckhwd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25]
+; ZNVER1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -5390,6 +6110,12 @@ define <16 x i8> @test_punpcklbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.50]
; BTVER2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_punpcklbw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25]
+; ZNVER1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -5440,6 +6166,13 @@ define <4 x i32> @test_punpckldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_punpckldq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.25]
+; ZNVER1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [8:0.50]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -5489,6 +6222,13 @@ define <2 x i64> @test_punpcklqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2)
; BTVER2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_punpcklqdq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.25]
+; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> <i32 0, i32 2>
@@ -5536,6 +6276,12 @@ define <8 x i16> @test_punpcklwd(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; BTVER2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_punpcklwd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25]
+; ZNVER1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -5584,6 +6330,13 @@ define <2 x i64> @test_pxor(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pxor:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = xor <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = xor <2 x i64> %1, %2
@@ -5633,6 +6386,13 @@ define <2 x double> @test_shufpd(<2 x double> %a0, <2 x double> %a1, <2 x double
; BTVER2-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_shufpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:0.50]
+; ZNVER1-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> <i32 1, i32 2>
@@ -5683,6 +6443,13 @@ define <2 x double> @test_sqrtpd(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [21:21.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_sqrtpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [27:1.00]
+; ZNVER1-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [20:1.00]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %2)
@@ -5741,6 +6508,14 @@ define <2 x double> @test_sqrtsd(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [26:21.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_sqrtsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovapd (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
+; ZNVER1-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %2)
@@ -5785,6 +6560,12 @@ define <2 x double> @test_subpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_subpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fsub <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fsub <2 x double> %1, %2
@@ -5827,6 +6608,12 @@ define double @test_subsd(double %a0, double %a1, double *%a2) {
; BTVER2-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_subsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = fsub double %a0, %a1
%2 = load double, double *%a2, align 8
%3 = fsub double %1, %2
@@ -5917,6 +6704,20 @@ define i32 @test_ucomisd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2)
; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50]
; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ucomisd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vucomisd %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
+; ZNVER1-NEXT: sete %cl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25]
+; ZNVER1-NEXT: vucomisd (%rdi), %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: setnp %al # sched: [1:0.25]
+; ZNVER1-NEXT: sete %dl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
+; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
+; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 8
%3 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %2)
@@ -5967,6 +6768,13 @@ define <2 x double> @test_unpckhpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_unpckhpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
+; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> <i32 1, i32 3>
@@ -6022,6 +6830,13 @@ define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_unpcklpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
+; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = shufflevector <2 x double> %1, <2 x double> %2, <2 x i32> <i32 0, i32 2>
@@ -6071,6 +6886,13 @@ define <2 x double> @test_xorpd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_xorpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = bitcast <2 x double> %a0 to <4 x i32>
%2 = bitcast <2 x double> %a1 to <4 x i32>
%3 = xor <4 x i32> %1, %2
diff --git a/test/CodeGen/X86/sse3-schedule.ll b/test/CodeGen/X86/sse3-schedule.ll
index ef1ddae4532d4..ad38d1c6ff490 100644
--- a/test/CodeGen/X86/sse3-schedule.ll
+++ b/test/CodeGen/X86/sse3-schedule.ll
@@ -7,7 +7,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
define <2 x double> @test_addsubpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_addsubpd:
@@ -45,6 +45,12 @@ define <2 x double> @test_addsubpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; BTVER2-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_addsubpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %1, <2 x double> %2)
@@ -88,6 +94,12 @@ define <4 x float> @test_addsubps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; BTVER2-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_addsubps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %1, <4 x float> %2)
@@ -131,6 +143,12 @@ define <2 x double> @test_haddpd(<2 x double> %a0, <2 x double> %a1, <2 x double
; BTVER2-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_haddpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %1, <2 x double> %2)
@@ -174,6 +192,12 @@ define <4 x float> @test_haddps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
; BTVER2-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_haddps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %2)
@@ -217,6 +241,12 @@ define <2 x double> @test_hsubpd(<2 x double> %a0, <2 x double> %a1, <2 x double
; BTVER2-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_hsubpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %1, <2 x double> %2)
@@ -260,6 +290,12 @@ define <4 x float> @test_hsubps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
; BTVER2-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_hsubps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %1, <4 x float> %2)
@@ -299,6 +335,11 @@ define <16 x i8> @test_lddqu(i8* %a0) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vlddqu (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_lddqu:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vlddqu (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %a0)
ret <16 x i8> %1
}
@@ -347,6 +388,13 @@ define <2 x double> @test_movddup(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:0.50]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movddup:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [8:0.50]
+; ZNVER1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:0.50]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
@@ -397,6 +445,13 @@ define <4 x float> @test_movshdup(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:0.50]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movshdup:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [8:0.50]
+; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:0.50]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -447,6 +502,13 @@ define <4 x float> @test_movsldup(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:0.50]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movsldup:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [8:0.50]
+; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:0.50]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
diff --git a/test/CodeGen/X86/sse41-schedule.ll b/test/CodeGen/X86/sse41-schedule.ll
index 1ab1598fcab7c..26cca98816a3d 100644
--- a/test/CodeGen/X86/sse41-schedule.ll
+++ b/test/CodeGen/X86/sse41-schedule.ll
@@ -6,7 +6,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
define <2 x double> @test_blendpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
; GENERIC-LABEL: test_blendpd:
@@ -43,6 +43,13 @@ define <2 x double> @test_blendpd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blendpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 3>
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fadd <2 x double> %a1, %1
@@ -80,6 +87,12 @@ define <4 x float> @test_blendps(<4 x float> %a0, <4 x float> %a1, <4 x float> *
; BTVER2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
; BTVER2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blendps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
+; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
@@ -122,6 +135,12 @@ define <2 x double> @test_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
; BTVER2-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blendvpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
%2 = load <2 x double>, <2 x double> *%a3, align 16
%3 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %1, <2 x double> %2, <2 x double> %a2)
@@ -165,6 +184,12 @@ define <4 x float> @test_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float>
; BTVER2-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_blendvps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
%2 = load <4 x float>, <4 x float> *%a3
%3 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %1, <4 x float> %2, <4 x float> %a2)
@@ -202,6 +227,12 @@ define <2 x double> @test_dppd(<2 x double> %a0, <2 x double> %a1, <2 x double>
; BTVER2-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_dppd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %1, <2 x double> %2, i8 7)
@@ -239,6 +270,12 @@ define <4 x float> @test_dpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2
; BTVER2-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_dpps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %1, <4 x float> %2, i8 7)
@@ -276,6 +313,12 @@ define <4 x float> @test_insertps(<4 x float> %a0, <4 x float> %a1, float *%a2)
; BTVER2-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:0.50]
; BTVER2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_insertps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:0.50]
+; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17)
%2 = load float, float *%a2
%3 = insertelement <4 x float> %1, float %2, i32 3
@@ -308,6 +351,11 @@ define <2 x i64> @test_movntdqa(i8* %a0) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [5:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movntdqa:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %a0)
ret <2 x i64> %1
}
@@ -343,6 +391,12 @@ define <8 x i16> @test_mpsadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BTVER2-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_mpsadbw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7)
%2 = bitcast <8 x i16> %1 to <16 x i8>
%3 = load <16 x i8>, <16 x i8> *%a2, align 16
@@ -381,6 +435,12 @@ define <8 x i16> @test_packusdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_packusdw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
%2 = bitcast <8 x i16> %1 to <4 x i32>
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
@@ -425,6 +485,12 @@ define <16 x i8> @test_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2, <16
; BTVER2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pblendvb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2)
%2 = load <16 x i8>, <16 x i8> *%a3, align 16
%3 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %1, <16 x i8> %2, <16 x i8> %a2)
@@ -462,6 +528,12 @@ define <8 x i16> @test_pblendw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
; BTVER2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pblendw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
+; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
@@ -498,6 +570,12 @@ define <2 x i64> @test_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpeqq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = icmp eq <2 x i64> %a0, %a1
%2 = sext <2 x i1> %1 to <2 x i64>
%3 = load <2 x i64>, <2 x i64>*%a2, align 16
@@ -536,6 +614,12 @@ define i32 @test_pextrb(<16 x i8> %a0, i8 *%a1) {
; BTVER2-NEXT: vpextrb $3, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pextrb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpextrb $3, %xmm0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = extractelement <16 x i8> %a0, i32 3
%2 = extractelement <16 x i8> %a0, i32 1
store i8 %2, i8 *%a1
@@ -573,6 +657,12 @@ define i32 @test_pextrd(<4 x i32> %a0, i32 *%a1) {
; BTVER2-NEXT: vpextrd $3, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pextrd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpextrd $3, %xmm0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = extractelement <4 x i32> %a0, i32 3
%2 = extractelement <4 x i32> %a0, i32 1
store i32 %2, i32 *%a1
@@ -609,6 +699,12 @@ define i64 @test_pextrq(<2 x i64> %a0, <2 x i64> %a1, i64 *%a2) {
; BTVER2-NEXT: vpextrq $1, %xmm0, %rax # sched: [1:0.50]
; BTVER2-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pextrq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpextrq $1, %xmm0, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = extractelement <2 x i64> %a0, i32 1
%2 = extractelement <2 x i64> %a0, i32 1
store i64 %2, i64 *%a2
@@ -645,6 +741,12 @@ define i32 @test_pextrw(<8 x i16> %a0, i16 *%a1) {
; BTVER2-NEXT: vpextrw $3, %xmm0, %eax # sched: [1:0.50]
; BTVER2-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pextrw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpextrw $3, %xmm0, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = extractelement <8 x i16> %a0, i32 3
%2 = extractelement <8 x i16> %a0, i32 1
store i16 %2, i16 *%a1
@@ -682,6 +784,12 @@ define <8 x i16> @test_phminposuw(<8 x i16> *%a0) {
; BTVER2-NEXT: vphminposuw (%rdi), %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: vphminposuw %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_phminposuw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vphminposuw (%rdi), %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: vphminposuw %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = load <8 x i16>, <8 x i16> *%a0, align 16
%2 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %1)
%3 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %2)
@@ -719,6 +827,12 @@ define <16 x i8> @test_pinsrb(<16 x i8> %a0, i8 %a1, i8 *%a2) {
; BTVER2-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pinsrb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <16 x i8> %a0, i8 %a1, i32 1
%2 = load i8, i8 *%a2
%3 = insertelement <16 x i8> %1, i8 %2, i32 3
@@ -755,6 +869,12 @@ define <4 x i32> @test_pinsrd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
; BTVER2-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pinsrd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <4 x i32> %a0, i32 %a1, i32 1
%2 = load i32, i32 *%a2
%3 = insertelement <4 x i32> %1, i32 %2, i32 3
@@ -796,6 +916,13 @@ define <2 x i64> @test_pinsrq(<2 x i64> %a0, <2 x i64> %a1, i64 %a2, i64 *%a3) {
; BTVER2-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pinsrq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = insertelement <2 x i64> %a0, i64 %a2, i32 1
%2 = load i64, i64 *%a3
%3 = insertelement <2 x i64> %a1, i64 %2, i32 1
@@ -833,6 +960,12 @@ define <16 x i8> @test_pmaxsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmaxsb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %1, <16 x i8> %2)
@@ -870,6 +1003,12 @@ define <4 x i32> @test_pmaxsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmaxsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %1, <4 x i32> %2)
@@ -907,6 +1046,12 @@ define <4 x i32> @test_pmaxud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmaxud:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %1, <4 x i32> %2)
@@ -944,6 +1089,12 @@ define <8 x i16> @test_pmaxuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmaxuw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %1, <8 x i16> %2)
@@ -981,6 +1132,12 @@ define <16 x i8> @test_pminsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pminsb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %1, <16 x i8> %2)
@@ -1018,6 +1175,12 @@ define <4 x i32> @test_pminsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pminsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %1, <4 x i32> %2)
@@ -1055,6 +1218,12 @@ define <4 x i32> @test_pminud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pminud:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %1, <4 x i32> %2)
@@ -1092,6 +1261,12 @@ define <8 x i16> @test_pminuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pminuw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %1, <8 x i16> %2)
@@ -1135,6 +1310,13 @@ define <8 x i16> @test_pmovsxbw(<16 x i8> %a0, <8 x i8> *%a1) {
; BTVER2-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovsxbw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = sext <8 x i8> %1 to <8 x i16>
%3 = load <8 x i8>, <8 x i8>* %a1, align 1
@@ -1179,6 +1361,13 @@ define <4 x i32> @test_pmovsxbd(<16 x i8> %a0, <4 x i8> *%a1) {
; BTVER2-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovsxbd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = sext <4 x i8> %1 to <4 x i32>
%3 = load <4 x i8>, <4 x i8>* %a1, align 1
@@ -1223,6 +1412,13 @@ define <2 x i64> @test_pmovsxbq(<16 x i8> %a0, <2 x i8> *%a1) {
; BTVER2-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovsxbq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
%2 = sext <2 x i8> %1 to <2 x i64>
%3 = load <2 x i8>, <2 x i8>* %a1, align 1
@@ -1267,6 +1463,13 @@ define <2 x i64> @test_pmovsxdq(<4 x i32> %a0, <2 x i32> *%a1) {
; BTVER2-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovsxdq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%2 = sext <2 x i32> %1 to <2 x i64>
%3 = load <2 x i32>, <2 x i32>* %a1, align 1
@@ -1311,6 +1514,13 @@ define <4 x i32> @test_pmovsxwd(<8 x i16> %a0, <4 x i16> *%a1) {
; BTVER2-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovsxwd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = sext <4 x i16> %1 to <4 x i32>
%3 = load <4 x i16>, <4 x i16>* %a1, align 1
@@ -1355,6 +1565,13 @@ define <2 x i64> @test_pmovsxwq(<8 x i16> %a0, <2 x i16> *%a1) {
; BTVER2-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovsxwq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
%2 = sext <2 x i16> %1 to <2 x i64>
%3 = load <2 x i16>, <2 x i16>* %a1, align 1
@@ -1399,6 +1616,13 @@ define <8 x i16> @test_pmovzxbw(<16 x i8> %a0, <8 x i8> *%a1) {
; BTVER2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.50]
; BTVER2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovzxbw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50]
+; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25]
+; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = zext <8 x i8> %1 to <8 x i16>
%3 = load <8 x i8>, <8 x i8>* %a1, align 1
@@ -1443,6 +1667,13 @@ define <4 x i32> @test_pmovzxbd(<16 x i8> %a0, <4 x i8> *%a1) {
; BTVER2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovzxbd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50]
+; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = zext <4 x i8> %1 to <4 x i32>
%3 = load <4 x i8>, <4 x i8>* %a1, align 1
@@ -1487,6 +1718,13 @@ define <2 x i64> @test_pmovzxbq(<16 x i8> %a0, <2 x i8> *%a1) {
; BTVER2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovzxbq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50]
+; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
%2 = zext <2 x i8> %1 to <2 x i64>
%3 = load <2 x i8>, <2 x i8>* %a1, align 1
@@ -1531,6 +1769,13 @@ define <2 x i64> @test_pmovzxdq(<4 x i32> %a0, <2 x i32> *%a1) {
; BTVER2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovzxdq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [8:0.50]
+; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%2 = zext <2 x i32> %1 to <2 x i64>
%3 = load <2 x i32>, <2 x i32>* %a1, align 1
@@ -1575,6 +1820,13 @@ define <4 x i32> @test_pmovzxwd(<8 x i16> %a0, <4 x i16> *%a1) {
; BTVER2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50]
; BTVER2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovzxwd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50]
+; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.25]
+; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = zext <4 x i16> %1 to <4 x i32>
%3 = load <4 x i16>, <4 x i16>* %a1, align 1
@@ -1619,6 +1871,13 @@ define <2 x i64> @test_pmovzxwq(<8 x i16> %a0, <2 x i16> *%a1) {
; BTVER2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.50]
; BTVER2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmovzxwq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [8:0.50]
+; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.25]
+; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
%2 = zext <2 x i16> %1 to <2 x i64>
%3 = load <2 x i16>, <2 x i16>* %a1, align 1
@@ -1657,6 +1916,12 @@ define <2 x i64> @test_pmuldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmuldq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
%2 = bitcast <2 x i64> %1 to <4 x i32>
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
@@ -1695,6 +1960,12 @@ define <4 x i32> @test_pmulld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmulld:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = mul <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = mul <4 x i32> %1, %2
@@ -1751,6 +2022,16 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: andb %al, %cl # sched: [1:0.50]
; BTVER2-NEXT: movzbl %cl, %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_ptest:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vptest %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: setb %al # sched: [1:0.25]
+; ZNVER1-NEXT: vptest (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: setb %cl # sched: [1:0.25]
+; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25]
+; ZNVER1-NEXT: movzbl %cl, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %2)
@@ -1795,6 +2076,13 @@ define <2 x double> @test_roundpd(<2 x double> %a0, <2 x double> *%a1) {
; BTVER2-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_roundpd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [10:1.00]
+; ZNVER1-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %2, i32 7)
@@ -1839,6 +2127,13 @@ define <4 x float> @test_roundps(<4 x float> %a0, <4 x float> *%a1) {
; BTVER2-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_roundps:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [10:1.00]
+; ZNVER1-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %2, i32 7)
@@ -1884,6 +2179,13 @@ define <2 x double> @test_roundsd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
; BTVER2-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_roundsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
+; ZNVER1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7)
%2 = load <2 x double>, <2 x double>* %a2, align 16
%3 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %2, i32 7)
@@ -1929,6 +2231,13 @@ define <4 x float> @test_roundss(<4 x float> %a0, <4 x float> %a1, <4 x float> *
; BTVER2-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_roundss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
+; ZNVER1-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %2, i32 7)
diff --git a/test/CodeGen/X86/sse42-schedule.ll b/test/CodeGen/X86/sse42-schedule.ll
index 7ce9ffdbd0ea1..adf857e121797 100644
--- a/test/CodeGen/X86/sse42-schedule.ll
+++ b/test/CodeGen/X86/sse42-schedule.ll
@@ -6,7 +6,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
define i32 @crc32_32_8(i32 %a0, i8 %a1, i8 *%a2) {
; GENERIC-LABEL: crc32_32_8:
@@ -43,6 +43,13 @@ define i32 @crc32_32_8(i32 %a0, i8 %a1, i8 *%a2) {
; BTVER2-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; BTVER2-NEXT: movl %edi, %eax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: crc32_32_8:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00]
+; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00]
+; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a0, i8 %a1)
%2 = load i8, i8 *%a2
%3 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %1, i8 %2)
@@ -85,6 +92,13 @@ define i32 @crc32_32_16(i32 %a0, i16 %a1, i16 *%a2) {
; BTVER2-NEXT: crc32w (%rdx), %edi # sched: [8:1.00]
; BTVER2-NEXT: movl %edi, %eax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: crc32_32_16:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: crc32w %si, %edi # sched: [3:1.00]
+; ZNVER1-NEXT: crc32w (%rdx), %edi # sched: [10:1.00]
+; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a0, i16 %a1)
%2 = load i16, i16 *%a2
%3 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %1, i16 %2)
@@ -127,6 +141,13 @@ define i32 @crc32_32_32(i32 %a0, i32 %a1, i32 *%a2) {
; BTVER2-NEXT: crc32l (%rdx), %edi # sched: [8:1.00]
; BTVER2-NEXT: movl %edi, %eax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: crc32_32_32:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: crc32l %esi, %edi # sched: [3:1.00]
+; ZNVER1-NEXT: crc32l (%rdx), %edi # sched: [10:1.00]
+; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a0, i32 %a1)
%2 = load i32, i32 *%a2
%3 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %1, i32 %2)
@@ -169,6 +190,13 @@ define i64 @crc32_64_8(i64 %a0, i8 %a1, i8 *%a2) nounwind {
; BTVER2-NEXT: crc32b (%rdx), %edi # sched: [8:1.00]
; BTVER2-NEXT: movq %rdi, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: crc32_64_8:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00]
+; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00]
+; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a0, i8 %a1)
%2 = load i8, i8 *%a2
%3 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %1, i8 %2)
@@ -211,6 +239,13 @@ define i64 @crc32_64_64(i64 %a0, i64 %a1, i64 *%a2) {
; BTVER2-NEXT: crc32q (%rdx), %rdi # sched: [8:1.00]
; BTVER2-NEXT: movq %rdi, %rax # sched: [1:0.17]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: crc32_64_64:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
+; ZNVER1-NEXT: crc32q (%rdx), %rdi # sched: [10:1.00]
+; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1)
%2 = load i64, i64 *%a2
%3 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %1, i64 %2)
@@ -283,6 +318,19 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; BTVER2-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpestri:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
+; ZNVER1-NEXT: movl %ecx, %esi # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %2, i32 7, i8 7)
@@ -341,6 +389,16 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: movl $7, %edx # sched: [1:0.17]
; BTVER2-NEXT: vpcmpestrm $7, (%rdi), %xmm0 # sched: [18:2.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpestrm:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7)
@@ -393,6 +451,15 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; BTVER2-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpistri:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: movl %ecx, %eax # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %2, i8 7)
@@ -431,6 +498,12 @@ define <16 x i8> @test_pcmpistrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [12:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpistrm:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %1, <16 x i8> %2, i8 7)
@@ -468,6 +541,12 @@ define <2 x i64> @test_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; BTVER2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pcmpgtq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = icmp sgt <2 x i64> %a0, %a1
%2 = sext <2 x i1> %1 to <2 x i64>
%3 = load <2 x i64>, <2 x i64>*%a2, align 16
diff --git a/test/CodeGen/X86/sse4a-schedule.ll b/test/CodeGen/X86/sse4a-schedule.ll
index 11afdb7989f15..9ad6b0dfd4d61 100644
--- a/test/CodeGen/X86/sse4a-schedule.ll
+++ b/test/CodeGen/X86/sse4a-schedule.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mattr=+sse4a | FileCheck %s --check-prefix=GENERIC
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=BTVER2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=ZNVER1
define <2 x i64> @test_extrq(<2 x i64> %a0, <16 x i8> %a1) {
; GENERIC-LABEL: test_extrq:
@@ -11,8 +11,13 @@ define <2 x i64> @test_extrq(<2 x i64> %a0, <16 x i8> %a1) {
;
; BTVER2-LABEL: test_extrq:
; BTVER2: # BB#0:
-; BTVER2-NEXT: extrq %xmm1, %xmm0
+; BTVER2-NEXT: extrq %xmm1, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_extrq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: extrq %xmm1, %xmm0 # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %a0, <16 x i8> %a1)
ret <2 x i64> %1
}
@@ -26,8 +31,13 @@ define <2 x i64> @test_extrqi(<2 x i64> %a0) {
;
; BTVER2-LABEL: test_extrqi:
; BTVER2: # BB#0:
-; BTVER2-NEXT: extrq $2, $3, %xmm0
+; BTVER2-NEXT: extrq $2, $3, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_extrqi:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: extrq $2, $3, %xmm0 # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a0, i8 3, i8 2)
ret <2 x i64> %1
}
@@ -41,8 +51,13 @@ define <2 x i64> @test_insertq(<2 x i64> %a0, <2 x i64> %a1) {
;
; BTVER2-LABEL: test_insertq:
; BTVER2: # BB#0:
-; BTVER2-NEXT: insertq %xmm1, %xmm0
+; BTVER2-NEXT: insertq %xmm1, %xmm0 # sched: [2:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_insertq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: insertq %xmm1, %xmm0 # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %a0, <2 x i64> %a1)
ret <2 x i64> %1
}
@@ -56,8 +71,13 @@ define <2 x i64> @test_insertqi(<2 x i64> %a0, <2 x i64> %a1) {
;
; BTVER2-LABEL: test_insertqi:
; BTVER2: # BB#0:
-; BTVER2-NEXT: insertq $6, $5, %xmm1, %xmm0
+; BTVER2-NEXT: insertq $6, $5, %xmm1, %xmm0 # sched: [2:2.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_insertqi:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: insertq $6, $5, %xmm1, %xmm0 # sched: [?:0.000000e+00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 6)
ret <2 x i64> %1
}
@@ -73,6 +93,11 @@ define void @test_movntsd(i8* %p, <2 x double> %a) {
; BTVER2: # BB#0:
; BTVER2-NEXT: movntsd %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movntsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: movntsd %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a)
ret void
}
@@ -88,6 +113,11 @@ define void @test_movntss(i8* %p, <4 x float> %a) {
; BTVER2: # BB#0:
; BTVER2-NEXT: movntss %xmm0, (%rdi) # sched: [1:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_movntss:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: movntss %xmm0, (%rdi) # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a)
ret void
}
diff --git a/test/CodeGen/X86/ssse3-schedule.ll b/test/CodeGen/X86/ssse3-schedule.ll
index f24969a30c337..24ace69ebb9e2 100644
--- a/test/CodeGen/X86/ssse3-schedule.ll
+++ b/test/CodeGen/X86/ssse3-schedule.ll
@@ -7,7 +7,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
define <16 x i8> @test_pabsb(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pabsb:
@@ -52,6 +52,13 @@ define <16 x i8> @test_pabsb(<16 x i8> %a0, <16 x i8> *%a1) {
; BTVER2-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pabsb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpabsb (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0)
%2 = load <16 x i8>, <16 x i8> *%a1, align 16
%3 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %2)
@@ -103,6 +110,13 @@ define <4 x i32> @test_pabsd(<4 x i32> %a0, <4 x i32> *%a1) {
; BTVER2-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pabsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpabsd (%rdi), %xmm1 # sched: [8:0.50]
+; ZNVER1-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0)
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
%3 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %2)
@@ -147,6 +161,11 @@ define <8 x i16> @test_pabsw(<8 x i16> %a0, <8 x i16> *%a1) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pabsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0)
%2 = load <8 x i16>, <8 x i16> *%a1, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %2)
@@ -196,6 +215,12 @@ define <8 x i16> @test_palignr(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.50]
; BTVER2-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_palignr:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.25]
+; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = shufflevector <8 x i16> %2, <8 x i16> %1, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -238,6 +263,12 @@ define <4 x i32> @test_phaddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_phaddd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %1, <4 x i32> %2)
@@ -289,6 +320,12 @@ define <8 x i16> @test_phaddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_phaddsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %1, <8 x i16> %2)
@@ -332,6 +369,12 @@ define <8 x i16> @test_phaddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_phaddw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %1, <8 x i16> %2)
@@ -375,6 +418,12 @@ define <4 x i32> @test_phsubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_phsubd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %1, <4 x i32> %2)
@@ -426,6 +475,12 @@ define <8 x i16> @test_phsubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_phsubsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %1, <8 x i16> %2)
@@ -469,6 +524,12 @@ define <8 x i16> @test_phsubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_phsubw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %1, <8 x i16> %2)
@@ -512,6 +573,12 @@ define <8 x i16> @test_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [7:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmaddubsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = bitcast <8 x i16> %1 to <16 x i8>
@@ -550,6 +617,11 @@ define <8 x i16> @test_pmulhrsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2: # BB#0:
; BTVER2-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pmulhrsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %1, <8 x i16> %2)
@@ -593,6 +665,12 @@ define <16 x i8> @test_pshufb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_pshufb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> %2)
@@ -644,6 +722,12 @@ define <16 x i8> @test_psignb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psignb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %1, <16 x i8> %2)
@@ -695,6 +779,12 @@ define <4 x i32> @test_psignd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; BTVER2-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psignd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %1, <4 x i32> %2)
@@ -746,6 +836,12 @@ define <8 x i16> @test_psignw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; BTVER2-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BTVER2-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; ZNVER1-LABEL: test_psignw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [5:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %1, <8 x i16> %2)
diff --git a/test/CodeGen/X86/statepoint-invoke.ll b/test/CodeGen/X86/statepoint-invoke.ll
index 29f8e3ed4f789..784b932addc85 100644
--- a/test/CodeGen/X86/statepoint-invoke.ll
+++ b/test/CodeGen/X86/statepoint-invoke.ll
@@ -95,8 +95,8 @@ left.relocs:
right:
; CHECK-LABEL: %right
- ; CHECK: movq
; CHECK: movq %rdx, (%rsp)
+ ; CHECK: movq
; CHECK: callq some_call
%sp2 = invoke token (i64, i32, void (i64 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i64f(i64 0, i32 0, void (i64 addrspace(1)*)* @some_call, i32 1, i32 0, i64 addrspace(1)* %val1, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i64 addrspace(1)* %val2, i64 addrspace(1)* %val3)
to label %right.relocs unwind label %exceptional_return.right
diff --git a/test/CodeGen/X86/statepoint-stack-usage.ll b/test/CodeGen/X86/statepoint-stack-usage.ll
index b16426eae3d5c..6e7fc7bf1c079 100644
--- a/test/CodeGen/X86/statepoint-stack-usage.ll
+++ b/test/CodeGen/X86/statepoint-stack-usage.ll
@@ -11,9 +11,9 @@ target triple = "x86_64-pc-linux-gnu"
define i32 @back_to_back_calls(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #1 gc "statepoint-example" {
; CHECK-LABEL: back_to_back_calls
; The exact stores don't matter, but there need to be three stack slots created
-; CHECK: movq %rdi, 16(%rsp)
-; CHECK: movq %rdx, 8(%rsp)
-; CHECK: movq %rsi, (%rsp)
+; CHECK-DAG: movq %rdi, 16(%rsp)
+; CHECK-DAG: movq %rdx, 8(%rsp)
+; CHECK-DAG: movq %rsi, (%rsp)
; There should be no more than three moves
; CHECK-NOT: movq
%safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c)
@@ -36,9 +36,9 @@ define i32 @back_to_back_calls(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 a
define i32 @reserve_first(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #1 gc "statepoint-example" {
; CHECK-LABEL: reserve_first
; The exact stores don't matter, but there need to be three stack slots created
-; CHECK: movq %rdi, 16(%rsp)
-; CHECK: movq %rdx, 8(%rsp)
-; CHECK: movq %rsi, (%rsp)
+; CHECK-DAG: movq %rdi, 16(%rsp)
+; CHECK-DAG: movq %rdx, 8(%rsp)
+; CHECK-DAG: movq %rsi, (%rsp)
%safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c)
%a1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 12, i32 12)
%b1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 12, i32 13)
@@ -61,21 +61,21 @@ define i32 @back_to_back_deopt(i32 %a, i32 %b, i32 %c) #1
gc "statepoint-example" {
; CHECK-LABEL: back_to_back_deopt
; The exact stores don't matter, but there need to be three stack slots created
-; CHECK: movl %ebx, 12(%rsp)
-; CHECK: movl %ebp, 8(%rsp)
-; CHECK: movl %r14d, 4(%rsp)
+; CHECK-DAG: movl %ebx, 12(%rsp)
+; CHECK-DAG: movl %ebp, 8(%rsp)
+; CHECK-DAG: movl %r14d, 4(%rsp)
; CHECK: callq
-; CHECK: movl %ebx, 12(%rsp)
-; CHECK: movl %ebp, 8(%rsp)
-; CHECK: movl %r14d, 4(%rsp)
+; CHECK-DAG: movl %ebx, 12(%rsp)
+; CHECK-DAG: movl %ebp, 8(%rsp)
+; CHECK-DAG: movl %r14d, 4(%rsp)
; CHECK: callq
-; CHECK: movl %ebx, 12(%rsp)
-; CHECK: movl %ebp, 8(%rsp)
-; CHECK: movl %r14d, 4(%rsp)
+; CHECK-DAG: movl %ebx, 12(%rsp)
+; CHECK-DAG: movl %ebp, 8(%rsp)
+; CHECK-DAG: movl %r14d, 4(%rsp)
; CHECK: callq
-; CHECK: movl %ebx, 12(%rsp)
-; CHECK: movl %ebp, 8(%rsp)
-; CHECK: movl %r14d, 4(%rsp)
+; CHECK-DAG: movl %ebx, 12(%rsp)
+; CHECK-DAG: movl %ebp, 8(%rsp)
+; CHECK-DAG: movl %r14d, 4(%rsp)
; CHECK: callq
call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 3, i32 %a, i32 %b, i32 %c)
call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 3, i32 %a, i32 %b, i32 %c)
@@ -89,9 +89,9 @@ define i32 @back_to_back_invokes(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32
; CHECK-LABEL: back_to_back_invokes
entry:
; The exact stores don't matter, but there need to be three stack slots created
- ; CHECK: movq %rdi, 16(%rsp)
- ; CHECK: movq %rdx, 8(%rsp)
- ; CHECK: movq %rsi, (%rsp)
+ ; CHECK-DAG: movq %rdi, 16(%rsp)
+ ; CHECK-DAG: movq %rdx, 8(%rsp)
+ ; CHECK-DAG: movq %rsi, (%rsp)
; CHECK: callq
%safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c)
to label %normal_return unwind label %exceptional_return
diff --git a/test/CodeGen/X86/statepoint-vector.ll b/test/CodeGen/X86/statepoint-vector.ll
index 5bc8f983ff06b..538d175649576 100644
--- a/test/CodeGen/X86/statepoint-vector.ll
+++ b/test/CodeGen/X86/statepoint-vector.ll
@@ -49,8 +49,8 @@ entry:
; CHECK: subq $40, %rsp
; CHECK: testb $1, %dil
; CHECK: movaps (%rsi), %xmm0
-; CHECK: movaps %xmm0, 16(%rsp)
-; CHECK: movaps %xmm0, (%rsp)
+; CHECK-DAG: movaps %xmm0, (%rsp)
+; CHECK-DAG: movaps %xmm0, 16(%rsp)
; CHECK: callq do_safepoint
; CHECK: movaps (%rsp), %xmm0
; CHECK: addq $40, %rsp
diff --git a/test/CodeGen/X86/vec_cmp_uint-128.ll b/test/CodeGen/X86/vec_cmp_uint-128.ll
index 8bed14e7e5f5f..cad7991c4f3b5 100644
--- a/test/CodeGen/X86/vec_cmp_uint-128.ll
+++ b/test/CodeGen/X86/vec_cmp_uint-128.ll
@@ -463,7 +463,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; AVX2-LABEL: gt_v4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -476,7 +476,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; AVX512-LABEL: gt_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -782,7 +782,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; AVX2-LABEL: lt_v4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
@@ -795,7 +795,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; AVX512-LABEL: lt_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
diff --git a/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/test/CodeGen/X86/vector-idiv-sdiv-128.ll
index 2b5eb695f53ea..87cf2026d1ef4 100644
--- a/test/CodeGen/X86/vector-idiv-sdiv-128.ll
+++ b/test/CodeGen/X86/vector-idiv-sdiv-128.ll
@@ -135,7 +135,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
;
; AVX2-LABEL: test_div7_4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
@@ -433,7 +433,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
;
; AVX2-LABEL: test_rem7_4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
@@ -444,7 +444,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
; AVX2-NEXT: vpsrad $2, %xmm1, %xmm1
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [7,7,7,7]
; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/test/CodeGen/X86/vector-idiv-sdiv-256.ll
index e7bfe3778212c..ce0ec6c3875ad 100644
--- a/test/CodeGen/X86/vector-idiv-sdiv-256.ll
+++ b/test/CodeGen/X86/vector-idiv-sdiv-256.ll
@@ -115,7 +115,7 @@ define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
;
; AVX2-LABEL: test_div7_8i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpmuldq %ymm2, %ymm3, %ymm2
@@ -381,7 +381,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
;
; AVX2-LABEL: test_rem7_8i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpmuldq %ymm2, %ymm3, %ymm2
@@ -392,7 +392,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: vpsrld $31, %ymm1, %ymm2
; AVX2-NEXT: vpsrad $2, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7]
; AVX2-NEXT: vpmulld %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/vector-idiv-udiv-128.ll b/test/CodeGen/X86/vector-idiv-udiv-128.ll
index cd17fcf8c85b4..8138442b3eafd 100644
--- a/test/CodeGen/X86/vector-idiv-udiv-128.ll
+++ b/test/CodeGen/X86/vector-idiv-udiv-128.ll
@@ -130,7 +130,7 @@ define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
;
; AVX2-LABEL: test_div7_4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
@@ -412,7 +412,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
;
; AVX2-LABEL: test_rem7_4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
@@ -423,7 +423,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; AVX2-NEXT: vpsrld $1, %xmm2, %xmm2
; AVX2-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpsrld $2, %xmm1, %xmm1
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [7,7,7,7]
; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/vector-idiv-udiv-256.ll b/test/CodeGen/X86/vector-idiv-udiv-256.ll
index 4adc2e2fb6c90..b0433110f1818 100644
--- a/test/CodeGen/X86/vector-idiv-udiv-256.ll
+++ b/test/CodeGen/X86/vector-idiv-udiv-256.ll
@@ -123,7 +123,7 @@ define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
;
; AVX2-LABEL: test_div7_8i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
@@ -392,7 +392,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
;
; AVX2-LABEL: test_rem7_8i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
@@ -403,7 +403,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpsrld $2, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7]
; AVX2-NEXT: vpmulld %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll
index 6719a66f030f8..c65c3e7fd004f 100644
--- a/test/CodeGen/X86/vector-idiv.ll
+++ b/test/CodeGen/X86/vector-idiv.ll
@@ -73,7 +73,7 @@ define <4 x i32> @PR20355(<4 x i32> %a) nounwind {
;
; AVX2-LABEL: PR20355:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
diff --git a/test/CodeGen/X86/vector-rotate-128.ll b/test/CodeGen/X86/vector-rotate-128.ll
index 852c1f4d3d981..04378ee2ee012 100644
--- a/test/CodeGen/X86/vector-rotate-128.ll
+++ b/test/CodeGen/X86/vector-rotate-128.ll
@@ -77,14 +77,19 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: var_rotate_v2i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
-; AVX512-NEXT: vpsubq %xmm1, %xmm2, %xmm2
-; AVX512-NEXT: vpsllvq %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vpsrlvq %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: var_rotate_v2i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v2i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolvq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: retq
;
; XOP-LABEL: var_rotate_v2i64:
; XOP: # BB#0:
@@ -207,21 +212,26 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; AVX2-LABEL: var_rotate_v4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [32,32,32,32]
; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm2
; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpsrlvd %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: var_rotate_v4i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
-; AVX512-NEXT: vpsubd %xmm1, %xmm2, %xmm2
-; AVX512-NEXT: vpsllvd %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vpsrlvd %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: var_rotate_v4i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v4i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolvd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: retq
;
; XOP-LABEL: var_rotate_v4i32:
; XOP: # BB#0:
@@ -844,28 +854,24 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: constant_rotate_v2i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1
-; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: constant_rotate_v2i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,14]
+; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
;
-; XOPAVX1-LABEL: constant_rotate_v2i64:
-; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm2
-; XOPAVX1-NEXT: vpshlq %xmm2, %xmm0, %xmm0
-; XOPAVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: constant_rotate_v2i64:
-; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOPAVX2-NEXT: retq
+; AVX512VL-LABEL: constant_rotate_v2i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: retq
+;
+; XOP-LABEL: constant_rotate_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v2i64:
; X32-SSE: # BB#0:
@@ -951,26 +957,24 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: constant_rotate_v4i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm1
-; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: constant_rotate_v4i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
+; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
;
-; XOPAVX1-LABEL: constant_rotate_v4i32:
-; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOPAVX1-NEXT: retq
+; AVX512VL-LABEL: constant_rotate_v4i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: retq
;
-; XOPAVX2-LABEL: constant_rotate_v4i32:
-; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOPAVX2-NEXT: retq
+; XOP-LABEL: constant_rotate_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v4i32:
; X32-SSE: # BB#0:
@@ -1100,11 +1104,7 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
;
; XOP-LABEL: constant_rotate_v8i16:
; XOP: # BB#0:
-; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm2, %xmm2
-; XOP-NEXT: vpshlw %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v8i16:
@@ -1281,11 +1281,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
;
; XOP-LABEL: constant_rotate_v16i8:
; XOP: # BB#0:
-; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm2, %xmm2
-; XOP-NEXT: vpshlb %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vprotb {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v16i8:
@@ -1371,12 +1367,18 @@ define <2 x i64> @splatconstant_rotate_v2i64(<2 x i64> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: splatconstant_rotate_v2i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllq $14, %xmm0, %xmm1
-; AVX512-NEXT: vpsrlq $50, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: splatconstant_rotate_v2i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_v2i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolq $14, %xmm0, %xmm0
+; AVX512VL-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_v2i64:
; XOP: # BB#0:
@@ -1412,12 +1414,18 @@ define <4 x i32> @splatconstant_rotate_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: splatconstant_rotate_v4i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpslld $4, %xmm0, %xmm1
-; AVX512-NEXT: vpsrld $28, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: splatconstant_rotate_v4i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_v4i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprold $4, %xmm0, %xmm0
+; AVX512VL-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_v4i32:
; XOP: # BB#0:
@@ -1544,11 +1552,19 @@ define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind {
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: splatconstant_rotate_mask_v2i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsrlq $49, %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: splatconstant_rotate_mask_v2i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_mask_v2i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolq $15, %xmm0, %xmm0
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_mask_v2i64:
; XOP: # BB#0:
@@ -1595,14 +1611,19 @@ define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: splatconstant_rotate_mask_v4i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpslld $4, %xmm0, %xmm1
-; AVX512-NEXT: vpsrld $28, %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: splatconstant_rotate_mask_v4i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_mask_v4i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprold $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_mask_v4i32:
; XOP: # BB#0:
diff --git a/test/CodeGen/X86/vector-rotate-256.ll b/test/CodeGen/X86/vector-rotate-256.ll
index 14215e486bf9e..3b65b68352b5b 100644
--- a/test/CodeGen/X86/vector-rotate-256.ll
+++ b/test/CodeGen/X86/vector-rotate-256.ll
@@ -41,21 +41,25 @@ define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; AVX2-LABEL: var_rotate_v4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [64,64,64,64]
; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm1
; AVX2-NEXT: vpsrlvq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: var_rotate_v4i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; AVX512-NEXT: vpsubq %ymm1, %ymm2, %ymm2
-; AVX512-NEXT: vpsllvq %ymm1, %ymm0, %ymm1
-; AVX512-NEXT: vpsrlvq %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: var_rotate_v4i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v4i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolvq %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: var_rotate_v4i64:
; XOPAVX1: # BB#0:
@@ -128,21 +132,25 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
;
; AVX2-LABEL: var_rotate_v8i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm1
; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: var_rotate_v8i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
-; AVX512-NEXT: vpsubd %ymm1, %ymm2, %ymm2
-; AVX512-NEXT: vpsllvd %ymm1, %ymm0, %ymm1
-; AVX512-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: var_rotate_v8i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v8i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolvd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: var_rotate_v8i32:
; XOPAVX1: # BB#0:
@@ -466,7 +474,7 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX1-NEXT: vpsllq $4, %xmm0, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT: vpsrlq $2, %xmm1, %xmm3
+; AVX1-NEXT: vpsrlq $4, %xmm1, %xmm3
; AVX1-NEXT: vpsrlq $14, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpsrlq $50, %xmm0, %xmm3
@@ -483,36 +491,36 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: constant_rotate_v4i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm1
-; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: constant_rotate_v4i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
+; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: constant_rotate_v4i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: constant_rotate_v4i64:
; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm2, %xmm3
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm4
-; XOPAVX1-NEXT: vpshlq %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm3
-; XOPAVX1-NEXT: vpshlq %xmm3, %xmm0, %xmm0
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v4i64:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm1
-; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX2-NEXT: retq
%shl = shl <4 x i64> %a, <i64 4, i64 14, i64 50, i64 60>
- %lshr = lshr <4 x i64> %a, <i64 60, i64 50, i64 14, i64 2>
+ %lshr = lshr <4 x i64> %a, <i64 60, i64 50, i64 14, i64 4>
%or = or <4 x i64> %shl, %lshr
ret <4 x i64> %or
}
@@ -549,30 +557,33 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: constant_rotate_v8i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm1
-; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: constant_rotate_v8i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
+; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: constant_rotate_v8i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: constant_rotate_v8i32:
; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm2, %xmm3
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm2, %xmm2
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v8i32:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm1
-; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX2-NEXT: retq
%shl = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
%lshr = lshr <8 x i32> %a, <i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21>
@@ -643,30 +654,18 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
;
; XOPAVX1-LABEL: constant_rotate_v16i16:
; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm2, %xmm3
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm3, %xmm4
-; XOPAVX1-NEXT: vpshlw %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm3, %xmm3
-; XOPAVX1-NEXT: vpshlw %xmm3, %xmm0, %xmm0
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v16i16:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm1
-; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm2, %xmm3
-; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; XOPAVX2-NEXT: vpshlw %xmm3, %xmm4, %xmm3
-; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm2, %xmm2
-; XOPAVX2-NEXT: vpshlw %xmm2, %xmm0, %xmm0
-; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX2-NEXT: retq
%shl = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%lshr = lshr <16 x i16> %a, <i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1>
@@ -768,32 +767,20 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
;
; XOPAVX1-LABEL: constant_rotate_v32i8:
; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm3
-; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm3, %xmm3
-; XOPAVX1-NEXT: vpshlb %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm0
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
+; XOPAVX1-NEXT: vprotb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vprotb %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v32i8:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; XOPAVX2-NEXT: vpshlb %xmm1, %xmm2, %xmm3
-; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm1
-; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; XOPAVX2-NEXT: vpsubb {{.*}}(%rip), %xmm3, %xmm3
-; XOPAVX2-NEXT: vpshlb %xmm3, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpshlb %xmm3, %xmm0, %xmm0
-; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
+; XOPAVX2-NEXT: vprotb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT: vprotb %xmm2, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
%shl = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
%lshr = lshr <32 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
@@ -825,12 +812,17 @@ define <4 x i64> @splatconstant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: splatconstant_rotate_v4i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllq $14, %ymm0, %ymm1
-; AVX512-NEXT: vpsrlq $50, %ymm0, %ymm0
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: splatconstant_rotate_v4i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_v4i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolq $14, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_v4i64:
; XOPAVX1: # BB#0:
@@ -873,12 +865,17 @@ define <8 x i32> @splatconstant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: splatconstant_rotate_v8i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpslld $4, %ymm0, %ymm1
-; AVX512-NEXT: vpsrld $28, %ymm0, %ymm0
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: splatconstant_rotate_v8i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_v8i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprold $4, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_v8i32:
; XOPAVX1: # BB#0:
@@ -1027,11 +1024,18 @@ define <4 x i64> @splatconstant_rotate_mask_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: splatconstant_rotate_mask_v4i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsrlq $49, %ymm0, %ymm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: splatconstant_rotate_mask_v4i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_mask_v4i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprolq $15, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v4i64:
; XOPAVX1: # BB#0:
@@ -1082,14 +1086,18 @@ define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: splatconstant_rotate_mask_v8i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpslld $4, %ymm0, %ymm1
-; AVX512-NEXT: vpsrld $28, %ymm0, %ymm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: splatconstant_rotate_mask_v8i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_mask_v8i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vprold $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v8i32:
; XOPAVX1: # BB#0:
diff --git a/test/CodeGen/X86/vector-rotate-512.ll b/test/CodeGen/X86/vector-rotate-512.ll
new file mode 100644
index 0000000000000..fa1b5c1c0cb4a
--- /dev/null
+++ b/test/CodeGen/X86/vector-rotate-512.ll
@@ -0,0 +1,831 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512VLBW
+
+;
+; Variable Rotates
+;
+
+define <8 x i64> @var_rotate_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
+; AVX512-LABEL: var_rotate_v8i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vprolvq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %b64 = sub <8 x i64> <i64 64, i64 64, i64 64, i64 64, i64 64, i64 64, i64 64, i64 64>, %b
+ %shl = shl <8 x i64> %a, %b
+ %lshr = lshr <8 x i64> %a, %b64
+ %or = or <8 x i64> %shl, %lshr
+ ret <8 x i64> %or
+}
+
+define <16 x i32> @var_rotate_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
+; AVX512-LABEL: var_rotate_v16i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vprolvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %b32 = sub <16 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %b
+ %shl = shl <16 x i32> %a, %b
+ %lshr = lshr <16 x i32> %a, %b32
+ %or = or <16 x i32> %shl, %lshr
+ ret <16 x i32> %or
+}
+
+define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
+; AVX512F-LABEL: var_rotate_v32i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512F-NEXT: vpsubw %ymm2, %ymm4, %ymm5
+; AVX512F-NEXT: vpsubw %ymm3, %ymm4, %ymm4
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpsllvd %zmm3, %zmm1, %zmm3
+; AVX512F-NEXT: vpmovdw %zmm3, %ymm3
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpsllvd %zmm2, %zmm0, %zmm2
+; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512F-NEXT: vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
+; AVX512F-NEXT: vpsrlvd %zmm3, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v32i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT: vpsubw %ymm2, %ymm4, %ymm5
+; AVX512VL-NEXT: vpsubw %ymm3, %ymm4, %ymm4
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpsllvd %zmm3, %zmm1, %zmm3
+; AVX512VL-NEXT: vpmovdw %zmm3, %ymm3
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpsllvd %zmm2, %zmm0, %zmm2
+; AVX512VL-NEXT: vpmovdw %zmm2, %ymm2
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512VL-NEXT: vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512VL-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
+; AVX512VL-NEXT: vpsrlvd %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: var_rotate_v32i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vmovdqu16 {{.*#+}} zmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VLBW-LABEL: var_rotate_v32i16:
+; AVX512VLBW: # BB#0:
+; AVX512VLBW-NEXT: vmovdqu16 {{.*#+}} zmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VLBW-NEXT: vpsubw %zmm1, %zmm2, %zmm2
+; AVX512VLBW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512VLBW-NEXT: retq
+ %b16 = sub <32 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %b
+ %shl = shl <32 x i16> %a, %b
+ %lshr = lshr <32 x i16> %a, %b16
+ %or = or <32 x i16> %shl, %lshr
+ ret <32 x i16> %or
+}
+
+define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
+; AVX512F-LABEL: var_rotate_v64i8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm4
+; AVX512F-NEXT: vpsubb %ymm3, %ymm5, %ymm5
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm6
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm3, %ymm6, %ymm1, %ymm6
+; AVX512F-NEXT: vpsllw $2, %ymm6, %ymm8
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT: vpand %ymm9, %ymm8, %ymm8
+; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm3, %ymm8, %ymm6, %ymm6
+; AVX512F-NEXT: vpaddb %ymm6, %ymm6, %ymm8
+; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm3, %ymm8, %ymm6, %ymm3
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
+; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm6, %ymm0, %ymm6
+; AVX512F-NEXT: vpsllw $2, %ymm6, %ymm7
+; AVX512F-NEXT: vpand %ymm9, %ymm7, %ymm7
+; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT: vpaddb %ymm6, %ymm6, %ymm7
+; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm7, %ymm6, %ymm2
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm6
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT: vpsllw $5, %ymm5, %ymm5
+; AVX512F-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm6
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512F-NEXT: vpand %ymm8, %ymm6, %ymm6
+; AVX512F-NEXT: vpaddb %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm6
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT: vpand %ymm9, %ymm6, %ymm6
+; AVX512F-NEXT: vpaddb %ymm5, %ymm5, %ymm5
+; AVX512F-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm3
+; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT: vpsllw $5, %ymm4, %ymm4
+; AVX512F-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm3
+; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
+; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm4
+; AVX512F-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm3
+; AVX512F-NEXT: vpand %ymm9, %ymm3, %ymm3
+; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm4
+; AVX512F-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v64i8:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT: vpsubb %ymm2, %ymm5, %ymm4
+; AVX512VL-NEXT: vpsubb %ymm3, %ymm5, %ymm5
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm6
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm1, %ymm6
+; AVX512VL-NEXT: vpsllw $2, %ymm6, %ymm8
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT: vpand %ymm9, %ymm8, %ymm8
+; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm3, %ymm8, %ymm6, %ymm6
+; AVX512VL-NEXT: vpaddb %ymm6, %ymm6, %ymm8
+; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm3, %ymm8, %ymm6, %ymm3
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
+; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm6, %ymm0, %ymm6
+; AVX512VL-NEXT: vpsllw $2, %ymm6, %ymm7
+; AVX512VL-NEXT: vpand %ymm9, %ymm7, %ymm7
+; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT: vpaddb %ymm6, %ymm6, %ymm7
+; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm7, %ymm6, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm6
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-NEXT: vpsllw $5, %ymm5, %ymm5
+; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $2, %ymm1, %ymm6
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512VL-NEXT: vpand %ymm8, %ymm6, %ymm6
+; AVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm6
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-NEXT: vpand %ymm9, %ymm6, %ymm6
+; AVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
+; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm5
+; AVX512VL-NEXT: vpand %ymm7, %ymm5, %ymm5
+; AVX512VL-NEXT: vpsllw $5, %ymm4, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm4, %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm5
+; AVX512VL-NEXT: vpand %ymm8, %ymm5, %ymm5
+; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm4, %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm5
+; AVX512VL-NEXT: vpand %ymm9, %ymm5, %ymm5
+; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm4, %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: var_rotate_v64i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm3
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
+; AVX512BW-NEXT: vpsllw $2, %zmm3, %zmm4
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm4, %zmm3 {%k1}
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1}
+; AVX512BW-NEXT: vpsllw $5, %zmm2, %zmm1
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm2
+; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k2
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
+; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT: vporq %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VLBW-LABEL: var_rotate_v64i8:
+; AVX512VLBW: # BB#0:
+; AVX512VLBW-NEXT: vmovdqu8 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
+; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm3
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT: vpsllw $5, %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
+; AVX512VLBW-NEXT: vpsllw $2, %zmm3, %zmm4
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm4, %zmm4
+; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vmovdqu8 %zmm4, %zmm3 {%k1}
+; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1}
+; AVX512VLBW-NEXT: vpsllw $5, %zmm2, %zmm1
+; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm2
+; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k2
+; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
+; AVX512VLBW-NEXT: vpsrlw $2, %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT: vpsrlw $1, %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
+; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
+; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm3, %zmm0
+; AVX512VLBW-NEXT: retq
+ %b8 = sub <64 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
+ %shl = shl <64 x i8> %a, %b
+ %lshr = lshr <64 x i8> %a, %b8
+ %or = or <64 x i8> %shl, %lshr
+ ret <64 x i8> %or
+}
+
+;
+; Constant Rotates
+;
+
+define <8 x i64> @constant_rotate_v8i64(<8 x i64> %a) nounwind {
+; AVX512-LABEL: constant_rotate_v8i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vprolvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %shl = shl <8 x i64> %a, <i64 4, i64 14, i64 50, i64 60, i64 4, i64 14, i64 50, i64 60>
+ %lshr = lshr <8 x i64> %a, <i64 60, i64 50, i64 14, i64 4, i64 60, i64 50, i64 14, i64 4>
+ %or = or <8 x i64> %shl, %lshr
+ ret <8 x i64> %or
+}
+
+define <16 x i32> @constant_rotate_v16i32(<16 x i32> %a) nounwind {
+; AVX512-LABEL: constant_rotate_v16i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vprolvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %shl = shl <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+ %lshr = lshr <16 x i32> %a, <i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21>
+ %or = or <16 x i32> %shl, %lshr
+ ret <16 x i32> %or
+}
+
+define <32 x i16> @constant_rotate_v32i16(<32 x i16> %a) nounwind {
+; AVX512F-LABEL: constant_rotate_v32i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm3
+; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm4 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512F-NEXT: vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpsrlvd %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: constant_rotate_v32i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512VL-NEXT: vpmullw %ymm2, %ymm1, %ymm3
+; AVX512VL-NEXT: vpmullw %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vmovdqa32 {{.*#+}} zmm4 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512VL-NEXT: vpsrlvd %zmm4, %zmm1, %zmm1
+; AVX512VL-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpsrlvd %zmm4, %zmm0, %zmm0
+; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: constant_rotate_v32i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VLBW-LABEL: constant_rotate_v32i16:
+; AVX512VLBW: # BB#0:
+; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512VLBW-NEXT: retq
+ %shl = shl <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %lshr = lshr <32 x i16> %a, <i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1>
+ %or = or <32 x i16> %shl, %lshr
+ ret <32 x i16> %or
+}
+
+define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
+; AVX512F-LABEL: constant_rotate_v64i8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
+; AVX512F-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT: vpsllw $2, %ymm2, %ymm5
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm7
+; AVX512F-NEXT: vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm5
+; AVX512F-NEXT: vpaddb %ymm7, %ymm7, %ymm8
+; AVX512F-NEXT: vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm5
+; AVX512F-NEXT: vpand %ymm3, %ymm5, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm3
+; AVX512F-NEXT: vpsllw $2, %ymm3, %ymm4
+; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX512F-NEXT: vpblendvb %ymm8, %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
+; AVX512F-NEXT: vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT: vpaddb %ymm6, %ymm6, %ymm8
+; AVX512F-NEXT: vpblendvb %ymm8, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512F-NEXT: vpaddb %ymm8, %ymm8, %ymm10
+; AVX512F-NEXT: vpblendvb %ymm10, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm10, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: constant_rotate_v64i8:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
+; AVX512VL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT: vpsllw $2, %ymm2, %ymm5
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT: vpand %ymm6, %ymm5, %ymm5
+; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm7
+; AVX512VL-NEXT: vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm5
+; AVX512VL-NEXT: vpaddb %ymm7, %ymm7, %ymm8
+; AVX512VL-NEXT: vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm5
+; AVX512VL-NEXT: vpand %ymm3, %ymm5, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm3
+; AVX512VL-NEXT: vpsllw $2, %ymm3, %ymm4
+; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm8, %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
+; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $2, %ymm1, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT: vpaddb %ymm6, %ymm6, %ymm8
+; AVX512VL-NEXT: vpblendvb %ymm8, %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512VL-NEXT: vpaddb %ymm8, %ymm8, %ymm10
+; AVX512VL-NEXT: vpblendvb %ymm10, %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm8, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm10, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: constant_rotate_v64i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vpsllw $2, %zmm2, %zmm3
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm3
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm3
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm3
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512BW-NEXT: vporq %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VLBW-LABEL: constant_rotate_v64i8:
+; AVX512VLBW: # BB#0:
+; AVX512VLBW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm2
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512VLBW-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k1}
+; AVX512VLBW-NEXT: vpsllw $2, %zmm2, %zmm3
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
+; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2 {%k1}
+; AVX512VLBW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm3
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VLBW-NEXT: vpsrlw $2, %zmm0, %zmm3
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VLBW-NEXT: vpsrlw $1, %zmm0, %zmm3
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
+; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512VLBW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm2, %zmm0
+; AVX512VLBW-NEXT: retq
+ %shl = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
+ %lshr = lshr <64 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
+ %or = or <64 x i8> %shl, %lshr
+ ret <64 x i8> %or
+}
+
+;
+; Uniform Constant Rotates
+;
+
+define <8 x i64> @splatconstant_rotate_v8i64(<8 x i64> %a) nounwind {
+; AVX512-LABEL: splatconstant_rotate_v8i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vprolq $14, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %shl = shl <8 x i64> %a, <i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14>
+ %lshr = lshr <8 x i64> %a, <i64 50, i64 50, i64 50, i64 50, i64 50, i64 50, i64 50, i64 50>
+ %or = or <8 x i64> %shl, %lshr
+ ret <8 x i64> %or
+}
+
+define <16 x i32> @splatconstant_rotate_v16i32(<16 x i32> %a) nounwind {
+; AVX512-LABEL: splatconstant_rotate_v16i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vprold $4, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %shl = shl <16 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ %lshr = lshr <16 x i32> %a, <i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28>
+ %or = or <16 x i32> %shl, %lshr
+ ret <16 x i32> %or
+}
+
+define <32 x i16> @splatconstant_rotate_v32i16(<32 x i16> %a) nounwind {
+; AVX512F-LABEL: splatconstant_rotate_v32i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpsllw $7, %ymm1, %ymm2
+; AVX512F-NEXT: vpsllw $7, %ymm0, %ymm3
+; AVX512F-NEXT: vpsrlw $9, %ymm1, %ymm1
+; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT: vpsrlw $9, %ymm0, %ymm0
+; AVX512F-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_v32i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllw $7, %ymm1, %ymm2
+; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm3
+; AVX512VL-NEXT: vpsrlw $9, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $9, %ymm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_rotate_v32i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm1
+; AVX512BW-NEXT: vpsrlw $9, %zmm0, %zmm0
+; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VLBW-LABEL: splatconstant_rotate_v32i16:
+; AVX512VLBW: # BB#0:
+; AVX512VLBW-NEXT: vpsllw $7, %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpsrlw $9, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512VLBW-NEXT: retq
+ %shl = shl <32 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ %lshr = lshr <32 x i16> %a, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
+ %or = or <32 x i16> %shl, %lshr
+ ret <32 x i16> %or
+}
+
+define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
+; AVX512F-LABEL: splatconstant_rotate_v64i8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm4
+; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_v64i8:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_rotate_v64i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VLBW-LABEL: splatconstant_rotate_v64i8:
+; AVX512VLBW: # BB#0:
+; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512VLBW-NEXT: retq
+ %shl = shl <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
+ %lshr = lshr <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
+ %or = or <64 x i8> %shl, %lshr
+ ret <64 x i8> %or
+}
+
+;
+; Masked Uniform Constant Rotates
+;
+
+define <8 x i64> @splatconstant_rotate_mask_v8i64(<8 x i64> %a) nounwind {
+; AVX512-LABEL: splatconstant_rotate_mask_v8i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vprolq $15, %zmm0, %zmm0
+; AVX512-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %shl = shl <8 x i64> %a, <i64 15, i64 15, i64 15, i64 15, i64 15, i64 15, i64 15, i64 15>
+ %lshr = lshr <8 x i64> %a, <i64 49, i64 49, i64 49, i64 49, i64 49, i64 49, i64 49, i64 49>
+ %rmask = and <8 x i64> %lshr, <i64 255, i64 127, i64 127, i64 255, i64 255, i64 127, i64 127, i64 255>
+ %lmask = and <8 x i64> %shl, <i64 33, i64 65, i64 129, i64 257, i64 33, i64 65, i64 129, i64 257>
+ %or = or <8 x i64> %lmask, %rmask
+ ret <8 x i64> %or
+}
+
+define <16 x i32> @splatconstant_rotate_mask_v16i32(<16 x i32> %a) nounwind {
+; AVX512-LABEL: splatconstant_rotate_mask_v16i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vprold $4, %zmm0, %zmm0
+; AVX512-NEXT: vpandd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %shl = shl <16 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ %lshr = lshr <16 x i32> %a, <i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28>
+ %rmask = and <16 x i32> %lshr, <i32 3, i32 7, i32 15, i32 31, i32 63, i32 127, i32 255, i32 511, i32 3, i32 7, i32 15, i32 31, i32 63, i32 127, i32 255, i32 511>
+ %lmask = and <16 x i32> %shl, <i32 511, i32 255, i32 127, i32 63, i32 31, i32 15, i32 7, i32 3, i32 511, i32 255, i32 127, i32 63, i32 31, i32 15, i32 7, i32 3>
+ %or = or <16 x i32> %lmask, %rmask
+ ret <16 x i32> %or
+}
+
+define <32 x i16> @splatconstant_rotate_mask_v32i16(<32 x i16> %a) nounwind {
+; AVX512F-LABEL: splatconstant_rotate_mask_v32i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpsllw $5, %ymm0, %ymm2
+; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm3
+; AVX512F-NEXT: vpsrlw $11, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $11, %ymm1, %ymm1
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55]
+; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33]
+; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_mask_v32i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllw $5, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm3
+; AVX512VL-NEXT: vpsrlw $11, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $11, %ymm1, %ymm1
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55]
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33]
+; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_rotate_mask_v32i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpsllw $5, %zmm0, %zmm1
+; AVX512BW-NEXT: vpsrlw $11, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VLBW-LABEL: splatconstant_rotate_mask_v32i16:
+; AVX512VLBW: # BB#0:
+; AVX512VLBW-NEXT: vpsllw $5, %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpsrlw $11, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512VLBW-NEXT: retq
+ %shl = shl <32 x i16> %a, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %lshr = lshr <32 x i16> %a, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
+ %rmask = and <32 x i16> %lshr, <i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55>
+ %lmask = and <32 x i16> %shl, <i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33>
+ %or = or <32 x i16> %lmask, %rmask
+ ret <32 x i16> %or
+}
+
+define <64 x i8> @splatconstant_rotate_mask_v64i8(<64 x i8> %a) nounwind {
+; AVX512F-LABEL: splatconstant_rotate_mask_v64i8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm3
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55]
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33]
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: splatconstant_rotate_mask_v64i8:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm3
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55]
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33]
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_rotate_mask_v64i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VLBW-LABEL: splatconstant_rotate_mask_v64i8:
+; AVX512VLBW: # BB#0:
+; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
+; AVX512VLBW-NEXT: retq
+ %shl = shl <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
+ %lshr = lshr <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
+ %rmask = and <64 x i8> %lshr, <i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55>
+ %lmask = and <64 x i8> %shl, <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
+ %or = or <64 x i8> %lmask, %rmask
+ ret <64 x i8> %or
+}
diff --git a/test/CodeGen/X86/vector-shift-ashr-256.ll b/test/CodeGen/X86/vector-shift-ashr-256.ll
index 09e143ddcd4d6..5f2b18fc9c03a 100644
--- a/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -45,7 +45,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; AVX2-LABEL: var_shift_v4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
@@ -66,7 +66,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; XOPAVX2-LABEL: var_shift_v4i64:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
@@ -667,7 +667,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; AVX2-LABEL: splatvar_shift_v4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -687,7 +687,7 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; XOPAVX2-LABEL: splatvar_shift_v4i64:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -1700,7 +1700,7 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; XOPAVX2-LABEL: splatconstant_shift_v4i64:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
-; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; XOPAVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [72057594037927936,72057594037927936,72057594037927936,72057594037927936]
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll
index 820178d2d9927..5f00e55e225ba 100644
--- a/test/CodeGen/X86/vector-tzcnt-128.ll
+++ b/test/CodeGen/X86/vector-tzcnt-128.ll
@@ -745,7 +745,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512CDVL-NEXT: vplzcntd %xmm0, %xmm0
-; AVX512CDVL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX512CDVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [31,31,31,31]
; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX512CDVL-NEXT: retq
;
@@ -755,7 +755,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512CD-NEXT: vpsubd %xmm0, %xmm1, %xmm1
; AVX512CD-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX512CD-NEXT: vpbroadcastd {{.*#+}} xmm1 = [31,31,31,31]
; AVX512CD-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll
index 30e5661d54859..4a7d25c1376e5 100644
--- a/test/CodeGen/X86/vector-tzcnt-256.ll
+++ b/test/CodeGen/X86/vector-tzcnt-256.ll
@@ -179,7 +179,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512CDVL-NEXT: vplzcntq %ymm0, %ymm0
-; AVX512CDVL-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX512CDVL-NEXT: vpbroadcastq {{.*#+}} ymm1 = [63,63,63,63]
; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; AVX512CDVL-NEXT: retq
;
@@ -189,7 +189,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512CD-NEXT: vpsubq %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX512CD-NEXT: vpbroadcastq {{.*#+}} ymm1 = [63,63,63,63]
; AVX512CD-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; AVX512CD-NEXT: retq
;
@@ -432,7 +432,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512CDVL-NEXT: vplzcntd %ymm0, %ymm0
-; AVX512CDVL-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX512CDVL-NEXT: vpbroadcastd {{.*#+}} ymm1 = [31,31,31,31,31,31,31,31]
; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm0
; AVX512CDVL-NEXT: retq
;
@@ -442,7 +442,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512CD-NEXT: vpsubd %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX512CD-NEXT: vpbroadcastd {{.*#+}} ymm1 = [31,31,31,31,31,31,31,31]
; AVX512CD-NEXT: vpsubd %ymm0, %ymm1, %ymm0
; AVX512CD-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-tzcnt-512.ll b/test/CodeGen/X86/vector-tzcnt-512.ll
index 3bf677aadf195..2fce8a6019313 100644
--- a/test/CodeGen/X86/vector-tzcnt-512.ll
+++ b/test/CodeGen/X86/vector-tzcnt-512.ll
@@ -89,7 +89,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: vpbroadcastq {{.*}}(%rip), %zmm1
+; AVX512CD-NEXT: vpbroadcastq {{.*#+}} zmm1 = [63,63,63,63,63,63,63,63]
; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm0
; AVX512CD-NEXT: retq
;
@@ -99,7 +99,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CDBW-NEXT: vpbroadcastq {{.*}}(%rip), %zmm1
+; AVX512CDBW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [63,63,63,63,63,63,63,63]
; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm0
; AVX512CDBW-NEXT: retq
;
@@ -235,7 +235,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandd %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: vpbroadcastd {{.*}}(%rip), %zmm1
+; AVX512CD-NEXT: vpbroadcastd {{.*#+}} zmm1 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm0
; AVX512CD-NEXT: retq
;
@@ -245,7 +245,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandd %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CDBW-NEXT: vpbroadcastd {{.*}}(%rip), %zmm1
+; AVX512CDBW-NEXT: vpbroadcastd {{.*#+}} zmm1 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm0
; AVX512CDBW-NEXT: retq
;
diff --git a/test/CodeGen/X86/vselect-avx.ll b/test/CodeGen/X86/vselect-avx.ll
index 5503cfc357e52..5825a56b6f99b 100644
--- a/test/CodeGen/X86/vselect-avx.ll
+++ b/test/CodeGen/X86/vselect-avx.ll
@@ -58,8 +58,8 @@ define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) {
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: movq (%rdi,%rsi,8), %rax
-; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1
-; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-0.5,-0.5,-0.5,-0.5]
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.5,0.5,0.5,0.5]
; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
; AVX2-NEXT: vmovupd %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
@@ -108,7 +108,7 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,
;
; AVX2-LABEL: test3:
; AVX2: ## BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm3
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766]
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuldq %xmm4, %xmm5, %xmm4
@@ -117,7 +117,7 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,
; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
; AVX2-NEXT: vpsrld $31, %xmm3, %xmm4
; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [3,3,3,3]
; AVX2-NEXT: vpmulld %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
diff --git a/test/CodeGen/X86/widen_arith-2.ll b/test/CodeGen/X86/widen_arith-2.ll
index 48753ad4fd762..5731b63f3bc14 100644
--- a/test/CodeGen/X86/widen_arith-2.ll
+++ b/test/CodeGen/X86/widen_arith-2.ll
@@ -16,20 +16,17 @@ define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
; CHECK-NEXT: .LBB0_2: # %forbody
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: movl (%esp), %eax
-; CHECK-NEXT: shll $3, %eax
-; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl (%esp), %eax
-; CHECK-NEXT: shll $3, %eax
-; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl (%esp), %ecx
+; CHECK-NEXT: leal (,%eax,8), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl %ecx, %edx
+; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; CHECK-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; CHECK-NEXT: pmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; CHECK-NEXT: psubw %xmm0, %xmm3
; CHECK-NEXT: pand %xmm1, %xmm3
; CHECK-NEXT: pshufb %xmm2, %xmm3
-; CHECK-NEXT: movq %xmm3, (%edx,%ecx,8)
+; CHECK-NEXT: movq %xmm3, (%edx,%eax,8)
; CHECK-NEXT: incl (%esp)
; CHECK-NEXT: .LBB0_1: # %forcond
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/test/CodeGen/X86/widen_cast-4.ll b/test/CodeGen/X86/widen_cast-4.ll
index e55d62a461aa5..cc6fb27a62938 100644
--- a/test/CodeGen/X86/widen_cast-4.ll
+++ b/test/CodeGen/X86/widen_cast-4.ll
@@ -16,22 +16,19 @@ define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
; NARROW-NEXT: .LBB0_2: # %forbody
; NARROW-NEXT: # in Loop: Header=BB0_1 Depth=1
; NARROW-NEXT: movl (%esp), %eax
-; NARROW-NEXT: shll $3, %eax
-; NARROW-NEXT: addl {{[0-9]+}}(%esp), %eax
-; NARROW-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; NARROW-NEXT: movl (%esp), %eax
-; NARROW-NEXT: shll $3, %eax
-; NARROW-NEXT: addl {{[0-9]+}}(%esp), %eax
-; NARROW-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; NARROW-NEXT: movl (%esp), %ecx
+; NARROW-NEXT: leal (,%eax,8), %ecx
; NARROW-NEXT: movl {{[0-9]+}}(%esp), %edx
+; NARROW-NEXT: addl %ecx, %edx
+; NARROW-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; NARROW-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; NARROW-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; NARROW-NEXT: pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; NARROW-NEXT: psubw %xmm0, %xmm2
; NARROW-NEXT: psllw $8, %xmm2
; NARROW-NEXT: psraw $8, %xmm2
; NARROW-NEXT: psraw $2, %xmm2
; NARROW-NEXT: pshufb %xmm1, %xmm2
-; NARROW-NEXT: movq %xmm2, (%edx,%ecx,8)
+; NARROW-NEXT: movq %xmm2, (%edx,%eax,8)
; NARROW-NEXT: incl (%esp)
; NARROW-NEXT: .LBB0_1: # %forcond
; NARROW-NEXT: # =>This Inner Loop Header: Depth=1
@@ -54,24 +51,21 @@ define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
; WIDE-NEXT: .LBB0_2: # %forbody
; WIDE-NEXT: # in Loop: Header=BB0_1 Depth=1
; WIDE-NEXT: movl (%esp), %eax
-; WIDE-NEXT: shll $3, %eax
-; WIDE-NEXT: addl {{[0-9]+}}(%esp), %eax
-; WIDE-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIDE-NEXT: movl (%esp), %eax
-; WIDE-NEXT: shll $3, %eax
-; WIDE-NEXT: addl {{[0-9]+}}(%esp), %eax
-; WIDE-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIDE-NEXT: movl (%esp), %ecx
+; WIDE-NEXT: leal (,%eax,8), %ecx
; WIDE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIDE-NEXT: addl %ecx, %edx
+; WIDE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; WIDE-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; WIDE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; WIDE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; WIDE-NEXT: pinsrd $1, 4(%eax,%ecx,8), %xmm3
+; WIDE-NEXT: pinsrd $1, 4(%ecx,%eax,8), %xmm3
; WIDE-NEXT: psubb %xmm0, %xmm3
; WIDE-NEXT: psrlw $2, %xmm3
; WIDE-NEXT: pand %xmm1, %xmm3
; WIDE-NEXT: pxor %xmm2, %xmm3
; WIDE-NEXT: psubb %xmm2, %xmm3
-; WIDE-NEXT: pextrd $1, %xmm3, 4(%edx,%ecx,8)
-; WIDE-NEXT: movd %xmm3, (%edx,%ecx,8)
+; WIDE-NEXT: pextrd $1, %xmm3, 4(%edx,%eax,8)
+; WIDE-NEXT: movd %xmm3, (%edx,%eax,8)
; WIDE-NEXT: incl (%esp)
; WIDE-NEXT: .LBB0_1: # %forcond
; WIDE-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/test/CodeGen/X86/win64-nosse-csrs.ll b/test/CodeGen/X86/win64-nosse-csrs.ll
index d1860b721044c..29d4f165392e3 100644
--- a/test/CodeGen/X86/win64-nosse-csrs.ll
+++ b/test/CodeGen/X86/win64-nosse-csrs.ll
@@ -20,7 +20,7 @@ entry-block:
}
; Function Attrs: nounwind uwtable
-define x86_64_win64cc i64 @peach() unnamed_addr #1 {
+define win64cc i64 @peach() unnamed_addr #1 {
entry-block:
%0 = call i64 @banana()
ret i64 %0
diff --git a/test/CodeGen/X86/win64_nonvol.ll b/test/CodeGen/X86/win64_nonvol.ll
index 8e5f6cec1ab70..e1c615d75f282 100644
--- a/test/CodeGen/X86/win64_nonvol.ll
+++ b/test/CodeGen/X86/win64_nonvol.ll
@@ -5,7 +5,7 @@
; Win64 nonvolatile registers get saved.
; CHECK-LABEL: bar:
-define x86_64_win64cc void @bar(i32 %a, i32 %b) {
+define win64cc void @bar(i32 %a, i32 %b) {
; CHECK-DAG: pushq %rdi
; CHECK-DAG: pushq %rsi
; CHECK-DAG: movaps %xmm6,
diff --git a/test/CodeGen/X86/win64_params.ll b/test/CodeGen/X86/win64_params.ll
index a0b552d4d5847..6b42735120137 100644
--- a/test/CodeGen/X86/win64_params.ll
+++ b/test/CodeGen/X86/win64_params.ll
@@ -12,7 +12,7 @@ entry:
ret i32 %add
}
-define x86_64_win64cc i32 @f7(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6) nounwind readnone optsize {
+define win64cc i32 @f7(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6) nounwind readnone optsize {
entry:
; CHECK: movl 48(%rsp), %eax
; CHECK: addl 40(%rsp), %eax
diff --git a/test/CodeGen/X86/win_chkstk.ll b/test/CodeGen/X86/win_chkstk.ll
index 0faa24ef7290d..c7550a467a352 100644
--- a/test/CodeGen/X86/win_chkstk.ll
+++ b/test/CodeGen/X86/win_chkstk.ll
@@ -51,7 +51,7 @@ entry:
; Make sure we don't call __chkstk or __alloca on non-Windows even if the
; caller has the Win64 calling convention.
-define x86_64_win64cc i32 @main4k_win64() nounwind {
+define win64cc i32 @main4k_win64() nounwind {
entry:
; WIN_X32: calll __chkstk
; WIN_X64: callq __chkstk
diff --git a/test/CodeGen/X86/win_coreclr_chkstk.ll b/test/CodeGen/X86/win_coreclr_chkstk.ll
index c9a5fc2b32884..b4b8010ec564e 100644
--- a/test/CodeGen/X86/win_coreclr_chkstk.ll
+++ b/test/CodeGen/X86/win_coreclr_chkstk.ll
@@ -103,7 +103,7 @@ entry:
; Make sure we don't emit the probe sequence if not on windows even if the
; caller has the Win64 calling convention.
-define x86_64_win64cc i32 @main4k_win64() nounwind {
+define win64cc i32 @main4k_win64() nounwind {
entry:
; WIN_X64: movq %gs:16, %rcx
; LINUX-NOT: movq %gs:16, %rcx
@@ -115,7 +115,7 @@ entry:
declare i32 @bar(i8*) nounwind
; Within-body inline probe expansion
-define x86_64_win64cc i32 @main4k_alloca(i64 %n) nounwind {
+define win64cc i32 @main4k_alloca(i64 %n) nounwind {
entry:
; WIN_X64: callq bar
; WIN_X64: movq %gs:16, [[R:%r.*]]
diff --git a/test/CodeGen/X86/x86-64-ms_abi-vararg.ll b/test/CodeGen/X86/x86-64-ms_abi-vararg.ll
index 299190e8a595e..e3387a2709cba 100644
--- a/test/CodeGen/X86/x86-64-ms_abi-vararg.ll
+++ b/test/CodeGen/X86/x86-64-ms_abi-vararg.ll
@@ -3,7 +3,7 @@
; Verify that the var arg parameters which are passed in registers are stored
; in home stack slots allocated by the caller and that AP is correctly
; calculated.
-define x86_64_win64cc void @average_va(i32 %count, ...) nounwind {
+define win64cc void @average_va(i32 %count, ...) nounwind {
entry:
; CHECK: pushq
; CHECK: movq %r9, 40(%rsp)
@@ -24,7 +24,7 @@ declare void @llvm.va_end(i8*) nounwind
; CHECK-LABEL: f5:
; CHECK: pushq
; CHECK: leaq 56(%rsp),
-define x86_64_win64cc i8** @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind {
+define win64cc i8** @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind {
entry:
%ap = alloca i8*, align 8
%ap.0 = bitcast i8** %ap to i8*
@@ -35,7 +35,7 @@ entry:
; CHECK-LABEL: f4:
; CHECK: pushq
; CHECK: leaq 48(%rsp),
-define x86_64_win64cc i8** @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
+define win64cc i8** @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
entry:
%ap = alloca i8*, align 8
%ap.0 = bitcast i8** %ap to i8*
@@ -46,7 +46,7 @@ entry:
; CHECK-LABEL: f3:
; CHECK: pushq
; CHECK: leaq 40(%rsp),
-define x86_64_win64cc i8** @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind {
+define win64cc i8** @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind {
entry:
%ap = alloca i8*, align 8
%ap.0 = bitcast i8** %ap to i8*
@@ -62,7 +62,7 @@ entry:
; CHECK: movq [[REG_copy1]], 8(%rsp)
; CHECK: movq [[REG_copy1]], (%rsp)
; CHECK: ret
-define x86_64_win64cc void @copy1(i64 %a0, ...) nounwind {
+define win64cc void @copy1(i64 %a0, ...) nounwind {
entry:
%ap = alloca i8*, align 8
%cp = alloca i8*, align 8
@@ -78,7 +78,7 @@ entry:
; CHECK: movq [[REG_copy4]], 8(%rsp)
; CHECK: movq [[REG_copy4]], (%rsp)
; CHECK: ret
-define x86_64_win64cc void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
+define win64cc void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
entry:
%ap = alloca i8*, align 8
%cp = alloca i8*, align 8
@@ -96,7 +96,7 @@ entry:
; CHECK: movq [[REG_arg4_2]], (%rsp)
; CHECK: movl 48(%rsp), %eax
; CHECK: ret
-define x86_64_win64cc i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
+define win64cc i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
entry:
%ap = alloca i8*, align 8
%ap.0 = bitcast i8** %ap to i8*
diff --git a/test/CodeGen/X86/x86-cmov-converter.ll b/test/CodeGen/X86/x86-cmov-converter.ll
new file mode 100644
index 0000000000000..39877c14429ff
--- /dev/null
+++ b/test/CodeGen/X86/x86-cmov-converter.ll
@@ -0,0 +1,321 @@
+; RUN: llc -mtriple=x86_64-pc-linux -x86-cmov-converter=true -verify-machineinstrs < %s | FileCheck %s
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; This test checks that x86-cmov-converter optimization transform CMOV
+;; instruction into branches when it is profitable.
+;; There are 5 cases below:
+;; 1. CmovInCriticalPath:
+;; CMOV depends on the condition and it is in the hot path.
+;; Thus, it worths transforming.
+;;
+;; 2. CmovNotInCriticalPath:
+;; similar test like in (1), just that CMOV is not in the hot path.
+;; Thus, it does not worth transforming.
+;;
+;; 3. MaxIndex:
+;; Maximum calculation algorithm that is looking for the max index,
+;; calculating CMOV value is cheaper than calculating CMOV condition.
+;; Thus, it worths transforming.
+;;
+;; 4. MaxValue:
+;; Maximum calculation algorithm that is looking for the max value,
+;; calculating CMOV value is not cheaper than calculating CMOV condition.
+;; Thus, it does not worth transforming.
+;;
+;; 5. BinarySearch:
+;; Usually, binary search CMOV is not predicted.
+;; Thus, it does not worth transforming.
+;;
+;; Test was created using the following command line:
+;; > clang -S -O2 -m64 -fno-vectorize -fno-unroll-loops -emit-llvm foo.c -o -
+;; Where foo.c is:
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;void CmovInHotPath(int n, int a, int b, int *c, int *d) {
+;; for (int i = 0; i < n; i++) {
+;; int t = c[i];
+;; if (c[i] * a > b)
+;; t = 10;
+;; c[i] = t;
+;; }
+;;}
+;;
+;;
+;;void CmovNotInHotPath(int n, int a, int b, int *c, int *d) {
+;; for (int i = 0; i < n; i++) {
+;; int t = c[i];
+;; if (c[i] * a > b)
+;; t = 10;
+;; c[i] = t;
+;; d[i] /= b;
+;; }
+;;}
+;;
+;;
+;;int MaxIndex(int n, int *a) {
+;; int t = 0;
+;; for (int i = 1; i < n; i++) {
+;; if (a[i] > a[t])
+;; t = i;
+;; }
+;; return a[t];
+;;}
+;;
+;;
+;;int MaxValue(int n, int *a) {
+;; int t = a[0];
+;; for (int i = 1; i < n; i++) {
+;; if (a[i] > t)
+;; t = a[i];
+;; }
+;; return t;
+;;}
+;;
+;;typedef struct Node Node;
+;;struct Node {
+;; unsigned Val;
+;; Node *Right;
+;; Node *Left;
+;;};
+;;
+;;unsigned BinarySearch(unsigned Mask, Node *Curr, Node *Next) {
+;; while (Curr->Val > Next->Val) {
+;; Curr = Next;
+;; if (Mask & (0x1 << Curr->Val))
+;; Next = Curr->Right;
+;; else
+;; Next = Curr->Left;
+;; }
+;; return Curr->Val;
+;;}
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%struct.Node = type { i32, %struct.Node*, %struct.Node* }
+
+; CHECK-LABEL: CmovInHotPath
+; CHECK-NOT: cmov
+; CHECK: jg
+
+define void @CmovInHotPath(i32 %n, i32 %a, i32 %b, i32* nocapture %c, i32* nocapture readnone %d) #0 {
+entry:
+ %cmp14 = icmp sgt i32 %n, 0
+ br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %mul = mul nsw i32 %0, %a
+ %cmp3 = icmp sgt i32 %mul, %b
+ %. = select i1 %cmp3, i32 10, i32 %0
+ store i32 %., i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: CmovNotInHotPath
+; CHECK: cmovg
+
+define void @CmovNotInHotPath(i32 %n, i32 %a, i32 %b, i32* nocapture %c, i32* nocapture %d) #0 {
+entry:
+ %cmp18 = icmp sgt i32 %n, 0
+ br i1 %cmp18, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %mul = mul nsw i32 %0, %a
+ %cmp3 = icmp sgt i32 %mul, %b
+ %. = select i1 %cmp3, i32 10, i32 %0
+ store i32 %., i32* %arrayidx, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx7, align 4
+ %div = sdiv i32 %1, %b
+ store i32 %div, i32* %arrayidx7, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: MaxIndex
+; CHECK-NOT: cmov
+; CHECK: jg
+
+define i32 @MaxIndex(i32 %n, i32* nocapture readonly %a) #0 {
+entry:
+ %cmp14 = icmp sgt i32 %n, 1
+ br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ %phitmp = sext i32 %i.0.t.0 to i64
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ %t.0.lcssa = phi i64 [ 0, %entry ], [ %phitmp, %for.cond.cleanup.loopexit ]
+ %arrayidx5 = getelementptr inbounds i32, i32* %a, i64 %t.0.lcssa
+ %0 = load i32, i32* %arrayidx5, align 4
+ ret i32 %0
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 1, %for.body.preheader ]
+ %t.015 = phi i32 [ %i.0.t.0, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx, align 4
+ %idxprom1 = sext i32 %t.015 to i64
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
+ %2 = load i32, i32* %arrayidx2, align 4
+ %cmp3 = icmp sgt i32 %1, %2
+ %3 = trunc i64 %indvars.iv to i32
+ %i.0.t.0 = select i1 %cmp3, i32 %3, i32 %t.015
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+; CHECK-LABEL: MaxValue
+; CHECK-NOT: jg
+; CHECK: cmovg
+
+define i32 @MaxValue(i32 %n, i32* nocapture readonly %a) #0 {
+entry:
+ %0 = load i32, i32* %a, align 4
+ %cmp13 = icmp sgt i32 %n, 1
+ br i1 %cmp13, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ %t.0.lcssa = phi i32 [ %0, %entry ], [ %.t.0, %for.body ]
+ ret i32 %t.0.lcssa
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 1, %for.body.preheader ]
+ %t.014 = phi i32 [ %.t.0, %for.body ], [ %0, %for.body.preheader ]
+ %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx1, align 4
+ %cmp2 = icmp sgt i32 %1, %t.014
+ %.t.0 = select i1 %cmp2, i32 %1, i32 %t.014
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: BinarySearch
+; CHECK: cmov
+
+define i32 @BinarySearch(i32 %Mask, %struct.Node* nocapture readonly %Curr, %struct.Node* nocapture readonly %Next) #0 {
+entry:
+ %Val8 = getelementptr inbounds %struct.Node, %struct.Node* %Curr, i64 0, i32 0
+ %0 = load i32, i32* %Val8, align 8
+ %Val19 = getelementptr inbounds %struct.Node, %struct.Node* %Next, i64 0, i32 0
+ %1 = load i32, i32* %Val19, align 8
+ %cmp10 = icmp ugt i32 %0, %1
+ br i1 %cmp10, label %while.body, label %while.end
+
+while.body: ; preds = %entry, %while.body
+ %2 = phi i32 [ %4, %while.body ], [ %1, %entry ]
+ %Next.addr.011 = phi %struct.Node* [ %3, %while.body ], [ %Next, %entry ]
+ %shl = shl i32 1, %2
+ %and = and i32 %shl, %Mask
+ %tobool = icmp eq i32 %and, 0
+ %Left = getelementptr inbounds %struct.Node, %struct.Node* %Next.addr.011, i64 0, i32 2
+ %Right = getelementptr inbounds %struct.Node, %struct.Node* %Next.addr.011, i64 0, i32 1
+ %Left.sink = select i1 %tobool, %struct.Node** %Left, %struct.Node** %Right
+ %3 = load %struct.Node*, %struct.Node** %Left.sink, align 8
+ %Val1 = getelementptr inbounds %struct.Node, %struct.Node* %3, i64 0, i32 0
+ %4 = load i32, i32* %Val1, align 8
+ %cmp = icmp ugt i32 %2, %4
+ br i1 %cmp, label %while.body, label %while.end
+
+while.end: ; preds = %while.body, %entry
+ %.lcssa = phi i32 [ %0, %entry ], [ %2, %while.body ]
+ ret i32 %.lcssa
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; The following test checks that x86-cmov-converter optimization transforms
+;; CMOV instructions into branch correctly.
+;;
+;; MBB:
+;; cond = cmp ...
+;; v1 = CMOVgt t1, f1, cond
+;; v2 = CMOVle s1, f2, cond
+;;
+;; Where: t1 = 11, f1 = 22, f2 = a
+;;
+;; After CMOV transformation
+;; -------------------------
+;; MBB:
+;; cond = cmp ...
+;; ja %SinkMBB
+;;
+;; FalseMBB:
+;; jmp %SinkMBB
+;;
+;; SinkMBB:
+;; %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
+;; %v2 = phi[%f1, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
+;; ; true-value with false-value
+;; ; Phi instruction cannot use
+;; ; previous Phi instruction result
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; CHECK-LABEL: Transform
+; CHECK-NOT: cmov
+; CHECK: divl [[a:%[0-9a-z]*]]
+; CHECK: cmpl [[a]], %eax
+; CHECK: movl $11, [[s1:%[0-9a-z]*]]
+; CHECK: movl [[a]], [[s2:%[0-9a-z]*]]
+; CHECK: ja [[SinkBB:.*]]
+; CHECK: [[FalseBB:.*]]:
+; CHECK: movl $22, [[s1]]
+; CHECK: movl $22, [[s2]]
+; CHECK: [[SinkBB]]:
+; CHECK: ja
+
+define void @Transform(i32 *%arr, i32 *%arr2, i32 %a, i32 %b, i32 %c, i32 %n) #0 {
+entry:
+ %cmp10 = icmp ugt i32 0, %n
+ br i1 %cmp10, label %while.body, label %while.end
+
+while.body: ; preds = %entry, %while.body
+ %i = phi i32 [ %i_inc, %while.body ], [ 0, %entry ]
+ %arr_i = getelementptr inbounds i32, i32* %arr, i32 %i
+ %x = load i32, i32* %arr_i, align 4
+ %div = udiv i32 %x, %a
+ %cond = icmp ugt i32 %div, %a
+ %condOpp = icmp ule i32 %div, %a
+ %s1 = select i1 %cond, i32 11, i32 22
+ %s2 = select i1 %condOpp, i32 %s1, i32 %a
+ %sum = urem i32 %s1, %s2
+ store i32 %sum, i32* %arr_i, align 4
+ %i_inc = add i32 %i, 1
+ %cmp = icmp ugt i32 %i_inc, %n
+ br i1 %cmp, label %while.body, label %while.end
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+attributes #0 = {"target-cpu"="x86-64"}
diff --git a/test/CodeGen/XCore/varargs.ll b/test/CodeGen/XCore/varargs.ll
index 2e364b275610c..b6f716d66c9df 100644
--- a/test/CodeGen/XCore/varargs.ll
+++ b/test/CodeGen/XCore/varargs.ll
@@ -26,10 +26,10 @@ entry:
; CHECK-LABEL: test_vararg
; CHECK: extsp 6
; CHECK: stw lr, sp[1]
-; CHECK: stw r3, sp[6]
-; CHECK: stw r0, sp[3]
-; CHECK: stw r1, sp[4]
-; CHECK: stw r2, sp[5]
+; CHECK-DAG: stw r3, sp[6]
+; CHECK-DAG: stw r0, sp[3]
+; CHECK-DAG: stw r1, sp[4]
+; CHECK-DAG: stw r2, sp[5]
; CHECK: ldaw r0, sp[3]
; CHECK: stw r0, sp[2]
%list = alloca i8*, align 4
diff --git a/test/DebugInfo/Generic/namespace.ll b/test/DebugInfo/Generic/namespace.ll
index 983e20db4111c..5a8f652631922 100644
--- a/test/DebugInfo/Generic/namespace.ll
+++ b/test/DebugInfo/Generic/namespace.ll
@@ -54,17 +54,14 @@
; CHECK-NOT: NULL
; CHECK: DW_TAG_imported_module
-; This is a bug, it should be in F2 but it inherits the file from its
-; enclosing scope
-; CHECK-NEXT: DW_AT_decl_file{{.*}}stdin
+; CHECK-NEXT: DW_AT_decl_file{{.*}}([[F2:.*]])
; CHECK-NEXT: DW_AT_decl_line{{.*}}(15)
; CHECK-NEXT: DW_AT_import{{.*}}=> {[[NS2]]})
; CHECK: NULL
; CHECK-NOT: NULL
; CHECK: DW_TAG_imported_module
-; Same bug as above, this should be F2
-; CHECK-NEXT: DW_AT_decl_file{{.*}}debug-info-namespace.cpp
+; CHECK-NEXT: DW_AT_decl_file{{.*}}([[F2:.*]])
; CHECK-NEXT: DW_AT_decl_line{{.*}}(18)
; CHECK-NEXT: DW_AT_import{{.*}}=> {[[NS1]]})
; CHECK-NOT: NULL
@@ -320,29 +317,29 @@ attributes #1 = { nounwind readnone }
!31 = !DIGlobalVariable(name: "i", linkageName: "_ZN1A1B1iE", line: 20, isLocal: false, isDefinition: true, scope: !6, file: !18, type: !13)
!32 = !DIGlobalVariable(name: "var_fwd", linkageName: "_ZN1A1B7var_fwdE", line: 44, isLocal: false, isDefinition: true, scope: !6, file: !18, type: !13)
!33 = !{!34, !35, !36, !37, !40, !41, !42, !43, !44, !45, !47, !48, !49, !51, !54, !55, !56}
-!34 = !DIImportedEntity(tag: DW_TAG_imported_module, line: 15, scope: !7, entity: !6)
-!35 = !DIImportedEntity(tag: DW_TAG_imported_module, line: 18, scope: !0, entity: !7)
-!36 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 19, name: "E", scope: !0, entity: !7)
-!37 = !DIImportedEntity(tag: DW_TAG_imported_module, line: 23, scope: !38, entity: !6)
+!34 = !DIImportedEntity(tag: DW_TAG_imported_module, file: !5, line: 15, scope: !7, entity: !6)
+!35 = !DIImportedEntity(tag: DW_TAG_imported_module, file: !5, line: 18, scope: !0, entity: !7)
+!36 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 19, name: "E", scope: !0, entity: !7)
+!37 = !DIImportedEntity(tag: DW_TAG_imported_module, file: !5, line: 23, scope: !38, entity: !6)
!38 = distinct !DILexicalBlock(line: 22, column: 10, file: !5, scope: !39)
!39 = distinct !DILexicalBlock(line: 22, column: 7, file: !5, scope: !21)
-!40 = !DIImportedEntity(tag: DW_TAG_imported_module, line: 26, scope: !21, entity: !7)
-!41 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 27, scope: !21, entity: !4)
-!42 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 28, scope: !21, entity: !8)
-!43 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 29, scope: !21, entity: !14)
-!44 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 30, scope: !21, entity: !31)
-!45 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 31, scope: !21, entity: !46)
+!40 = !DIImportedEntity(tag: DW_TAG_imported_module, file: !5, line: 26, scope: !21, entity: !7)
+!41 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 27, scope: !21, entity: !4)
+!42 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 28, scope: !21, entity: !8)
+!43 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 29, scope: !21, entity: !14)
+!44 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 30, scope: !21, entity: !31)
+!45 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 31, scope: !21, entity: !46)
!46 = !DIDerivedType(tag: DW_TAG_typedef, name: "baz", line: 7, file: !5, scope: !6, baseType: !8)
-!47 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 32, name: "X", scope: !21, entity: !7)
-!48 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 33, name: "Y", scope: !21, entity: !47)
-!49 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 34, scope: !21, entity: !50)
+!47 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 32, name: "X", scope: !21, entity: !7)
+!48 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 33, name: "Y", scope: !21, entity: !47)
+!49 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 34, scope: !21, entity: !50)
!50 = !DIGlobalVariable(name: "var_decl", linkageName: "_ZN1A1B8var_declE", line: 8, isLocal: false, isDefinition: false, scope: !6, file: !18, type: !13)
-!51 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 35, scope: !21, entity: !52)
+!51 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 35, scope: !21, entity: !52)
!52 = !DISubprogram(name: "func_decl", linkageName: "_ZN1A1B9func_declEv", line: 9, isLocal: false, isDefinition: false, flags: DIFlagPrototyped, isOptimized: false, file: !5, scope: !6, type: !19, variables: !53)
!53 = !{} ; previously: invalid DW_TAG_base_type
-!54 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 36, scope: !21, entity: !32)
-!55 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 37, scope: !21, entity: !26)
-!56 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 42, scope: !7, entity: !31)
+!54 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 36, scope: !21, entity: !32)
+!55 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 37, scope: !21, entity: !26)
+!56 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !5, line: 42, scope: !7, entity: !31)
!57 = !{i32 2, !"Dwarf Version", i32 2}
!58 = !{i32 2, !"Debug Info Version", i32 3}
!59 = !{!"clang version 3.6.0 "}
diff --git a/test/DebugInfo/PDB/pdbdump-headers.test b/test/DebugInfo/PDB/pdbdump-headers.test
index 1887af2e82683..14fe4bb352f63 100644
--- a/test/DebugInfo/PDB/pdbdump-headers.test
+++ b/test/DebugInfo/PDB/pdbdump-headers.test
@@ -91,189 +91,189 @@ ALL-NEXT: Mod 0001 | `* Linker *`:
ALL: Types (TPI Stream)
ALL-NEXT: ============================================================
ALL-NEXT: Showing 75 records
-ALL-NEXT: 0x1000 | LF_ARGLIST [size = 8, hash = 205956]
-ALL-NEXT: 0x1001 | LF_PROCEDURE [size = 16, hash = 163561]
+ALL-NEXT: 0x1000 | LF_ARGLIST [size = 8, hash = 0x32484]
+ALL-NEXT: 0x1001 | LF_PROCEDURE [size = 16, hash = 0x27EE9]
ALL-NEXT: return type = 0x0074 (int), # args = 0, param list = 0x1000
ALL-NEXT: calling conv = cdecl, options = None
-ALL-NEXT: 0x1002 | LF_FIELDLIST [size = 76, hash = 59811]
+ALL-NEXT: 0x1002 | LF_FIELDLIST [size = 76, hash = 0xE9A3]
ALL-NEXT: - LF_ENUMERATE [apartment = 1]
ALL-NEXT: - LF_ENUMERATE [single = 2]
ALL-NEXT: - LF_ENUMERATE [free = 3]
ALL-NEXT: - LF_ENUMERATE [neutral = 4]
ALL-NEXT: - LF_ENUMERATE [both = 5]
-ALL-NEXT: 0x1003 | LF_ENUM [size = 120, hash = 208239] `__vc_attributes::threadingAttribute::threading_e`
+ALL-NEXT: 0x1003 | LF_ENUM [size = 120, hash = 0x32D6F] `__vc_attributes::threadingAttribute::threading_e`
ALL-NEXT: unique name: `.?AW4threading_e@threadingAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x1002, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x1004 | LF_STRUCTURE [size = 100, hash = 16377] `__vc_attributes::threadingAttribute`
+ALL-NEXT: 0x1004 | LF_STRUCTURE [size = 100, hash = 0x3FF9] `__vc_attributes::threadingAttribute`
ALL-NEXT: unique name: `.?AUthreadingAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
-ALL-NEXT: 0x1005 | LF_POINTER [size = 12, hash = 247078]
+ALL-NEXT: 0x1005 | LF_POINTER [size = 12, hash = 0x3C526]
ALL-NEXT: referent = 0x1004, mode = pointer, opts = const, kind = ptr32
-ALL-NEXT: 0x1006 | LF_ARGLIST [size = 12, hash = 194342]
+ALL-NEXT: 0x1006 | LF_ARGLIST [size = 12, hash = 0x2F726]
ALL-NEXT: 0x1003: `__vc_attributes::threadingAttribute::threading_e`
-ALL-NEXT: 0x1007 | LF_MFUNCTION [size = 28, hash = 254156]
+ALL-NEXT: 0x1007 | LF_MFUNCTION [size = 28, hash = 0x3E0CC]
ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1006
ALL-NEXT: class type = 0x1004, this type = 0x1005, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1008 | LF_MFUNCTION [size = 28, hash = 194536]
+ALL-NEXT: 0x1008 | LF_MFUNCTION [size = 28, hash = 0x2F7E8]
ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x1004, this type = 0x1005, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1009 | LF_METHODLIST [size = 20, hash = 167492]
+ALL-NEXT: 0x1009 | LF_METHODLIST [size = 20, hash = 0x28E44]
ALL-NEXT: - Method [type = 0x1007, vftable offset = -1, attrs = public]
ALL-NEXT: - Method [type = 0x1008, vftable offset = -1, attrs = public]
-ALL-NEXT: 0x100A | LF_FIELDLIST [size = 68, hash = 185421]
+ALL-NEXT: 0x100A | LF_FIELDLIST [size = 68, hash = 0x2D44D]
ALL-NEXT: - LF_NESTTYPE [name = `threading_e`, parent = 0x1003]
ALL-NEXT: - LF_METHOD [name = `threadingAttribute`, # overloads = 2, overload list = 0x1009]
ALL-NEXT: - LF_MEMBER [name = `value`, Type = 0x1003, offset = 0, attrs = public]
-ALL-NEXT: 0x100B | LF_STRUCTURE [size = 100, hash = 119540] `__vc_attributes::threadingAttribute`
+ALL-NEXT: 0x100B | LF_STRUCTURE [size = 100, hash = 0x1D2F4] `__vc_attributes::threadingAttribute`
ALL-NEXT: unique name: `.?AUthreadingAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x100A
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
-ALL-NEXT: 0x100C | LF_FIELDLIST [size = 48, hash = 261871]
+ALL-NEXT: 0x100C | LF_FIELDLIST [size = 48, hash = 0x3FEEF]
ALL-NEXT: - LF_ENUMERATE [native = 0]
ALL-NEXT: - LF_ENUMERATE [com = 1]
ALL-NEXT: - LF_ENUMERATE [managed = 2]
-ALL-NEXT: 0x100D | LF_ENUM [size = 120, hash = 198119] `__vc_attributes::event_receiverAttribute::type_e`
+ALL-NEXT: 0x100D | LF_ENUM [size = 120, hash = 0x305E7] `__vc_attributes::event_receiverAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@event_receiverAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x100C, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x100E | LF_STRUCTURE [size = 112, hash = 48056] `__vc_attributes::event_receiverAttribute`
+ALL-NEXT: 0x100E | LF_STRUCTURE [size = 112, hash = 0xBBB8] `__vc_attributes::event_receiverAttribute`
ALL-NEXT: unique name: `.?AUevent_receiverAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
-ALL-NEXT: 0x100F | LF_POINTER [size = 12, hash = 251486]
+ALL-NEXT: 0x100F | LF_POINTER [size = 12, hash = 0x3D65E]
ALL-NEXT: referent = 0x100E, mode = pointer, opts = const, kind = ptr32
-ALL-NEXT: 0x1010 | LF_ARGLIST [size = 16, hash = 134580]
+ALL-NEXT: 0x1010 | LF_ARGLIST [size = 16, hash = 0x20DB4]
ALL-NEXT: 0x100D: `__vc_attributes::event_receiverAttribute::type_e`
ALL-NEXT: 0x0030 (bool): `bool`
-ALL-NEXT: 0x1011 | LF_MFUNCTION [size = 28, hash = 148190]
+ALL-NEXT: 0x1011 | LF_MFUNCTION [size = 28, hash = 0x242DE]
ALL-NEXT: return type = 0x0003 (void), # args = 2, param list = 0x1010
ALL-NEXT: class type = 0x100E, this type = 0x100F, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1012 | LF_ARGLIST [size = 12, hash = 113636]
+ALL-NEXT: 0x1012 | LF_ARGLIST [size = 12, hash = 0x1BBE4]
ALL-NEXT: 0x100D: `__vc_attributes::event_receiverAttribute::type_e`
-ALL-NEXT: 0x1013 | LF_MFUNCTION [size = 28, hash = 53336]
+ALL-NEXT: 0x1013 | LF_MFUNCTION [size = 28, hash = 0xD058]
ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1012
ALL-NEXT: class type = 0x100E, this type = 0x100F, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1014 | LF_MFUNCTION [size = 28, hash = 55779]
+ALL-NEXT: 0x1014 | LF_MFUNCTION [size = 28, hash = 0xD9E3]
ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x100E, this type = 0x100F, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1015 | LF_METHODLIST [size = 28, hash = 220695]
+ALL-NEXT: 0x1015 | LF_METHODLIST [size = 28, hash = 0x35E17]
ALL-NEXT: - Method [type = 0x1011, vftable offset = -1, attrs = public]
ALL-NEXT: - Method [type = 0x1013, vftable offset = -1, attrs = public]
ALL-NEXT: - Method [type = 0x1014, vftable offset = -1, attrs = public]
-ALL-NEXT: 0x1016 | LF_FIELDLIST [size = 96, hash = 198114]
+ALL-NEXT: 0x1016 | LF_FIELDLIST [size = 96, hash = 0x305E2]
ALL-NEXT: - LF_NESTTYPE [name = `type_e`, parent = 0x100D]
ALL-NEXT: - LF_METHOD [name = `event_receiverAttribute`, # overloads = 3, overload list = 0x1015]
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x100D, offset = 0, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `layout_dependent`, Type = 0x0030 (bool), offset = 4, attrs = public]
-ALL-NEXT: 0x1017 | LF_STRUCTURE [size = 112, hash = 148734] `__vc_attributes::event_receiverAttribute`
+ALL-NEXT: 0x1017 | LF_STRUCTURE [size = 112, hash = 0x244FE] `__vc_attributes::event_receiverAttribute`
ALL-NEXT: unique name: `.?AUevent_receiverAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1016
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
-ALL-NEXT: 0x1018 | LF_FIELDLIST [size = 48, hash = 81128]
+ALL-NEXT: 0x1018 | LF_FIELDLIST [size = 48, hash = 0x13CE8]
ALL-NEXT: - LF_ENUMERATE [never = 0]
ALL-NEXT: - LF_ENUMERATE [allowed = 1]
ALL-NEXT: - LF_ENUMERATE [always = 2]
-ALL-NEXT: 0x1019 | LF_ENUM [size = 116, hash = 60158] `__vc_attributes::aggregatableAttribute::type_e`
+ALL-NEXT: 0x1019 | LF_ENUM [size = 116, hash = 0xEAFE] `__vc_attributes::aggregatableAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@aggregatableAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x1018, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x101A | LF_STRUCTURE [size = 108, hash = 217249] `__vc_attributes::aggregatableAttribute`
+ALL-NEXT: 0x101A | LF_STRUCTURE [size = 108, hash = 0x350A1] `__vc_attributes::aggregatableAttribute`
ALL-NEXT: unique name: `.?AUaggregatableAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
-ALL-NEXT: 0x101B | LF_POINTER [size = 12, hash = 174209]
+ALL-NEXT: 0x101B | LF_POINTER [size = 12, hash = 0x2A881]
ALL-NEXT: referent = 0x101A, mode = pointer, opts = const, kind = ptr32
-ALL-NEXT: 0x101C | LF_ARGLIST [size = 12, hash = 159978]
+ALL-NEXT: 0x101C | LF_ARGLIST [size = 12, hash = 0x270EA]
ALL-NEXT: 0x1019: `__vc_attributes::aggregatableAttribute::type_e`
-ALL-NEXT: 0x101D | LF_MFUNCTION [size = 28, hash = 249504]
+ALL-NEXT: 0x101D | LF_MFUNCTION [size = 28, hash = 0x3CEA0]
ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x101C
ALL-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x101E | LF_MFUNCTION [size = 28, hash = 141941]
+ALL-NEXT: 0x101E | LF_MFUNCTION [size = 28, hash = 0x22A75]
ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x101A, this type = 0x101B, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x101F | LF_METHODLIST [size = 20, hash = 238785]
+ALL-NEXT: 0x101F | LF_METHODLIST [size = 20, hash = 0x3A4C1]
ALL-NEXT: - Method [type = 0x101D, vftable offset = -1, attrs = public]
ALL-NEXT: - Method [type = 0x101E, vftable offset = -1, attrs = public]
-ALL-NEXT: 0x1020 | LF_FIELDLIST [size = 68, hash = 6214]
+ALL-NEXT: 0x1020 | LF_FIELDLIST [size = 68, hash = 0x1846]
ALL-NEXT: - LF_NESTTYPE [name = `type_e`, parent = 0x1019]
ALL-NEXT: - LF_METHOD [name = `aggregatableAttribute`, # overloads = 2, overload list = 0x101F]
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x1019, offset = 0, attrs = public]
-ALL-NEXT: 0x1021 | LF_STRUCTURE [size = 108, hash = 94935] `__vc_attributes::aggregatableAttribute`
+ALL-NEXT: 0x1021 | LF_STRUCTURE [size = 108, hash = 0x172D7] `__vc_attributes::aggregatableAttribute`
ALL-NEXT: unique name: `.?AUaggregatableAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1020
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
-ALL-NEXT: 0x1022 | LF_ENUM [size = 116, hash = 151449] `__vc_attributes::event_sourceAttribute::type_e`
+ALL-NEXT: 0x1022 | LF_ENUM [size = 116, hash = 0x24F99] `__vc_attributes::event_sourceAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@event_sourceAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x100C, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x1023 | LF_FIELDLIST [size = 28, hash = 135589]
+ALL-NEXT: 0x1023 | LF_FIELDLIST [size = 28, hash = 0x211A5]
ALL-NEXT: - LF_ENUMERATE [speed = 0]
ALL-NEXT: - LF_ENUMERATE [size = 1]
-ALL-NEXT: 0x1024 | LF_ENUM [size = 124, hash = 73373] `__vc_attributes::event_sourceAttribute::optimize_e`
+ALL-NEXT: 0x1024 | LF_ENUM [size = 124, hash = 0x11E9D] `__vc_attributes::event_sourceAttribute::optimize_e`
ALL-NEXT: unique name: `.?AW4optimize_e@event_sourceAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x1023, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x1025 | LF_STRUCTURE [size = 108, hash = 96512] `__vc_attributes::event_sourceAttribute`
+ALL-NEXT: 0x1025 | LF_STRUCTURE [size = 108, hash = 0x17900] `__vc_attributes::event_sourceAttribute`
ALL-NEXT: unique name: `.?AUevent_sourceAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
-ALL-NEXT: 0x1026 | LF_POINTER [size = 12, hash = 254299]
+ALL-NEXT: 0x1026 | LF_POINTER [size = 12, hash = 0x3E15B]
ALL-NEXT: referent = 0x1025, mode = pointer, opts = const, kind = ptr32
-ALL-NEXT: 0x1027 | LF_ARGLIST [size = 12, hash = 17744]
+ALL-NEXT: 0x1027 | LF_ARGLIST [size = 12, hash = 0x4550]
ALL-NEXT: 0x1022: `__vc_attributes::event_sourceAttribute::type_e`
-ALL-NEXT: 0x1028 | LF_MFUNCTION [size = 28, hash = 239514]
+ALL-NEXT: 0x1028 | LF_MFUNCTION [size = 28, hash = 0x3A79A]
ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1027
ALL-NEXT: class type = 0x1025, this type = 0x1026, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1029 | LF_MFUNCTION [size = 28, hash = 173189]
+ALL-NEXT: 0x1029 | LF_MFUNCTION [size = 28, hash = 0x2A485]
ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x1025, this type = 0x1026, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x102A | LF_METHODLIST [size = 20, hash = 130544]
+ALL-NEXT: 0x102A | LF_METHODLIST [size = 20, hash = 0x1FDF0]
ALL-NEXT: - Method [type = 0x1028, vftable offset = -1, attrs = public]
ALL-NEXT: - Method [type = 0x1029, vftable offset = -1, attrs = public]
-ALL-NEXT: 0x102B | LF_FIELDLIST [size = 128, hash = 204437]
+ALL-NEXT: 0x102B | LF_FIELDLIST [size = 128, hash = 0x31E95]
ALL-NEXT: - LF_NESTTYPE [name = `type_e`, parent = 0x1022]
ALL-NEXT: - LF_NESTTYPE [name = `optimize_e`, parent = 0x1024]
ALL-NEXT: - LF_METHOD [name = `event_sourceAttribute`, # overloads = 2, overload list = 0x102A]
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x1022, offset = 0, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `optimize`, Type = 0x1024, offset = 4, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `decorate`, Type = 0x0030 (bool), offset = 8, attrs = public]
-ALL-NEXT: 0x102C | LF_STRUCTURE [size = 108, hash = 238560] `__vc_attributes::event_sourceAttribute`
+ALL-NEXT: 0x102C | LF_STRUCTURE [size = 108, hash = 0x3A3E0] `__vc_attributes::event_sourceAttribute`
ALL-NEXT: unique name: `.?AUevent_sourceAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x102B
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
-ALL-NEXT: 0x102D | LF_FIELDLIST [size = 92, hash = 144673]
+ALL-NEXT: 0x102D | LF_FIELDLIST [size = 92, hash = 0x23521]
ALL-NEXT: - LF_ENUMERATE [dll = 1]
ALL-NEXT: - LF_ENUMERATE [exe = 2]
ALL-NEXT: - LF_ENUMERATE [service = 3]
ALL-NEXT: - LF_ENUMERATE [unspecified = 4]
ALL-NEXT: - LF_ENUMERATE [EXE = 2]
ALL-NEXT: - LF_ENUMERATE [SERVICE = 3]
-ALL-NEXT: 0x102E | LF_ENUM [size = 104, hash = 115151] `__vc_attributes::moduleAttribute::type_e`
+ALL-NEXT: 0x102E | LF_ENUM [size = 104, hash = 0x1C1CF] `__vc_attributes::moduleAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@moduleAttribute@__vc_attributes@@`
ALL-NEXT: field list: 0x102D, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x102F | LF_STRUCTURE [size = 96, hash = 197306] `__vc_attributes::moduleAttribute`
+ALL-NEXT: 0x102F | LF_STRUCTURE [size = 96, hash = 0x302BA] `__vc_attributes::moduleAttribute`
ALL-NEXT: unique name: `.?AUmoduleAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
-ALL-NEXT: 0x1030 | LF_POINTER [size = 12, hash = 256035]
+ALL-NEXT: 0x1030 | LF_POINTER [size = 12, hash = 0x3E823]
ALL-NEXT: referent = 0x102F, mode = pointer, opts = const, kind = ptr32
-ALL-NEXT: 0x1031 | LF_MODIFIER [size = 12, hash = 101096]
+ALL-NEXT: 0x1031 | LF_MODIFIER [size = 12, hash = 0x18AE8]
ALL-NEXT: referent = 0x0070 (char), modifiers = const
-ALL-NEXT: 0x1032 | LF_POINTER [size = 12, hash = 231280]
+ALL-NEXT: 0x1032 | LF_POINTER [size = 12, hash = 0x38770]
ALL-NEXT: referent = 0x1031, mode = pointer, opts = None, kind = ptr32
-ALL-NEXT: 0x1033 | LF_ARGLIST [size = 68, hash = 52156]
+ALL-NEXT: 0x1033 | LF_ARGLIST [size = 68, hash = 0xCBBC]
ALL-NEXT: 0x102E: `__vc_attributes::moduleAttribute::type_e`
ALL-NEXT: 0x1032: `const char*`
ALL-NEXT: 0x1032: `const char*`
@@ -289,25 +289,25 @@ ALL-NEXT: 0x0030 (bool): `bool`
ALL-NEXT: 0x0030 (bool): `bool`
ALL-NEXT: 0x1032: `const char*`
ALL-NEXT: 0x1032: `const char*`
-ALL-NEXT: 0x1034 | LF_MFUNCTION [size = 28, hash = 48854]
+ALL-NEXT: 0x1034 | LF_MFUNCTION [size = 28, hash = 0xBED6]
ALL-NEXT: return type = 0x0003 (void), # args = 15, param list = 0x1033
ALL-NEXT: class type = 0x102F, this type = 0x1030, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1035 | LF_ARGLIST [size = 12, hash = 170035]
+ALL-NEXT: 0x1035 | LF_ARGLIST [size = 12, hash = 0x29833]
ALL-NEXT: 0x102E: `__vc_attributes::moduleAttribute::type_e`
-ALL-NEXT: 0x1036 | LF_MFUNCTION [size = 28, hash = 177041]
+ALL-NEXT: 0x1036 | LF_MFUNCTION [size = 28, hash = 0x2B391]
ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1035
ALL-NEXT: class type = 0x102F, this type = 0x1030, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1037 | LF_MFUNCTION [size = 28, hash = 102745]
+ALL-NEXT: 0x1037 | LF_MFUNCTION [size = 28, hash = 0x19159]
ALL-NEXT: return type = 0x0003 (void), # args = 0, param list = 0x1000
ALL-NEXT: class type = 0x102F, this type = 0x1030, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1038 | LF_METHODLIST [size = 28, hash = 16947]
+ALL-NEXT: 0x1038 | LF_METHODLIST [size = 28, hash = 0x4233]
ALL-NEXT: - Method [type = 0x1034, vftable offset = -1, attrs = public]
ALL-NEXT: - Method [type = 0x1036, vftable offset = -1, attrs = public]
ALL-NEXT: - Method [type = 0x1037, vftable offset = -1, attrs = public]
-ALL-NEXT: 0x1039 | LF_FIELDLIST [size = 356, hash = 183703]
+ALL-NEXT: 0x1039 | LF_FIELDLIST [size = 356, hash = 0x2CD97]
ALL-NEXT: - LF_NESTTYPE [name = `type_e`, parent = 0x102E]
ALL-NEXT: - LF_METHOD [name = `moduleAttribute`, # overloads = 3, overload list = 0x1038]
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x102E, offset = 0, attrs = public]
@@ -325,11 +325,11 @@ ALL-NEXT: - LF_MEMBER [name = `hidden`, Type = 0x0030 (bool), offset
ALL-NEXT: - LF_MEMBER [name = `restricted`, Type = 0x0030 (bool), offset = 45, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `custom`, Type = 0x1032, offset = 48, attrs = public]
ALL-NEXT: - LF_MEMBER [name = `resource_name`, Type = 0x1032, offset = 52, attrs = public]
-ALL-NEXT: 0x103A | LF_STRUCTURE [size = 96, hash = 98548] `__vc_attributes::moduleAttribute`
+ALL-NEXT: 0x103A | LF_STRUCTURE [size = 96, hash = 0x180F4] `__vc_attributes::moduleAttribute`
ALL-NEXT: unique name: `.?AUmoduleAttribute@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1039
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
-ALL-NEXT: 0x103B | LF_FIELDLIST [size = 756, hash = 35693]
+ALL-NEXT: 0x103B | LF_FIELDLIST [size = 756, hash = 0x8B6D]
ALL-NEXT: - LF_ENUMERATE [eAnyUsage = 0]
ALL-NEXT: - LF_ENUMERATE [eCoClassUsage = 1]
ALL-NEXT: - LF_ENUMERATE [eCOMInterfaceUsage = 2]
@@ -360,58 +360,58 @@ ALL-NEXT: - LF_ENUMERATE [eModuleUsage = 16777216]
ALL-NEXT: - LF_ENUMERATE [eIllegalUsage = 33554432]
ALL-NEXT: - LF_ENUMERATE [eAsynchronousUsage = 67108864]
ALL-NEXT: - LF_ENUMERATE [eAnyIDLUsage = 4161535]
-ALL-NEXT: 0x103C | LF_ENUM [size = 140, hash = 171328] `__vc_attributes::helper_attributes::usageAttribute::usage_e`
+ALL-NEXT: 0x103C | LF_ENUM [size = 140, hash = 0x29D40] `__vc_attributes::helper_attributes::usageAttribute::usage_e`
ALL-NEXT: unique name: `.?AW4usage_e@usageAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: field list: 0x103B, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x103D | LF_STRUCTURE [size = 128, hash = 203640] `__vc_attributes::helper_attributes::usageAttribute`
+ALL-NEXT: 0x103D | LF_STRUCTURE [size = 128, hash = 0x31B78] `__vc_attributes::helper_attributes::usageAttribute`
ALL-NEXT: unique name: `.?AUusageAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
-ALL-NEXT: 0x103E | LF_POINTER [size = 12, hash = 139292]
+ALL-NEXT: 0x103E | LF_POINTER [size = 12, hash = 0x2201C]
ALL-NEXT: referent = 0x103D, mode = pointer, opts = const, kind = ptr32
-ALL-NEXT: 0x103F | LF_ARGLIST [size = 12, hash = 49018]
+ALL-NEXT: 0x103F | LF_ARGLIST [size = 12, hash = 0xBF7A]
ALL-NEXT: 0x0075 (unsigned): `unsigned`
-ALL-NEXT: 0x1040 | LF_MFUNCTION [size = 28, hash = 43821]
+ALL-NEXT: 0x1040 | LF_MFUNCTION [size = 28, hash = 0xAB2D]
ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x103F
ALL-NEXT: class type = 0x103D, this type = 0x103E, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1041 | LF_FIELDLIST [size = 60, hash = 202555]
+ALL-NEXT: 0x1041 | LF_FIELDLIST [size = 60, hash = 0x3173B]
ALL-NEXT: - LF_NESTTYPE [name = `usage_e`, parent = 0x103C]
ALL-NEXT: - LF_ONEMETHOD [name = `usageAttribute`]
ALL-NEXT: type = 0x1040, vftable offset = -1, attrs = public
ALL-NEXT: - LF_MEMBER [name = `value`, Type = 0x0075 (unsigned), offset = 0, attrs = public]
-ALL-NEXT: 0x1042 | LF_STRUCTURE [size = 128, hash = 165040] `__vc_attributes::helper_attributes::usageAttribute`
+ALL-NEXT: 0x1042 | LF_STRUCTURE [size = 128, hash = 0x284B0] `__vc_attributes::helper_attributes::usageAttribute`
ALL-NEXT: unique name: `.?AUusageAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1041
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
-ALL-NEXT: 0x1043 | LF_FIELDLIST [size = 68, hash = 215835]
+ALL-NEXT: 0x1043 | LF_FIELDLIST [size = 68, hash = 0x34B1B]
ALL-NEXT: - LF_ENUMERATE [eBoolean = 0]
ALL-NEXT: - LF_ENUMERATE [eInteger = 1]
ALL-NEXT: - LF_ENUMERATE [eFloat = 2]
ALL-NEXT: - LF_ENUMERATE [eDouble = 3]
-ALL-NEXT: 0x1044 | LF_ENUM [size = 148, hash = 142625] `__vc_attributes::helper_attributes::v1_alttypeAttribute::type_e`
+ALL-NEXT: 0x1044 | LF_ENUM [size = 148, hash = 0x22D21] `__vc_attributes::helper_attributes::v1_alttypeAttribute::type_e`
ALL-NEXT: unique name: `.?AW4type_e@v1_alttypeAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: field list: 0x1043, underlying type: 0x0074 (int)
ALL-NEXT: options: has unique name | is nested
-ALL-NEXT: 0x1045 | LF_STRUCTURE [size = 140, hash = 52534] `__vc_attributes::helper_attributes::v1_alttypeAttribute`
+ALL-NEXT: 0x1045 | LF_STRUCTURE [size = 140, hash = 0xCD36] `__vc_attributes::helper_attributes::v1_alttypeAttribute`
ALL-NEXT: unique name: `.?AUv1_alttypeAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: <no type>
ALL-NEXT: options: forward ref | has unique name
-ALL-NEXT: 0x1046 | LF_POINTER [size = 12, hash = 44186]
+ALL-NEXT: 0x1046 | LF_POINTER [size = 12, hash = 0xAC9A]
ALL-NEXT: referent = 0x1045, mode = pointer, opts = const, kind = ptr32
-ALL-NEXT: 0x1047 | LF_ARGLIST [size = 12, hash = 103930]
+ALL-NEXT: 0x1047 | LF_ARGLIST [size = 12, hash = 0x195FA]
ALL-NEXT: 0x1044: `__vc_attributes::helper_attributes::v1_alttypeAttribute::type_e`
-ALL-NEXT: 0x1048 | LF_MFUNCTION [size = 28, hash = 110942]
+ALL-NEXT: 0x1048 | LF_MFUNCTION [size = 28, hash = 0x1B15E]
ALL-NEXT: return type = 0x0003 (void), # args = 1, param list = 0x1047
ALL-NEXT: class type = 0x1045, this type = 0x1046, this adjust = 0
ALL-NEXT: calling conv = thiscall, options = constructor
-ALL-NEXT: 0x1049 | LF_FIELDLIST [size = 64, hash = 17991]
+ALL-NEXT: 0x1049 | LF_FIELDLIST [size = 64, hash = 0x4647]
ALL-NEXT: - LF_NESTTYPE [name = `type_e`, parent = 0x1044]
ALL-NEXT: - LF_ONEMETHOD [name = `v1_alttypeAttribute`]
ALL-NEXT: type = 0x1048, vftable offset = -1, attrs = public
ALL-NEXT: - LF_MEMBER [name = `type`, Type = 0x1044, offset = 0, attrs = public]
-ALL-NEXT: 0x104A | LF_STRUCTURE [size = 140, hash = 213215] `__vc_attributes::helper_attributes::v1_alttypeAttribute`
+ALL-NEXT: 0x104A | LF_STRUCTURE [size = 140, hash = 0x340DF] `__vc_attributes::helper_attributes::v1_alttypeAttribute`
ALL-NEXT: unique name: `.?AUv1_alttypeAttribute@helper_attributes@__vc_attributes@@`
ALL-NEXT: vtable: <no type>, base list: <no type>, field list: 0x1049
ALL-NEXT: options: has ctor / dtor | contains nested class | has unique name
@@ -421,29 +421,29 @@ ALL: Hash Adjusters:
ALL: Types (IPI Stream)
ALL-NEXT: ============================================================
ALL-NEXT: Showing 15 records
-ALL-NEXT: 0x1000 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 7186]
+ALL-NEXT: 0x1000 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 0x1C12]
ALL-NEXT: udt = 0x100B, mod = 1, file = 1, line = 481
-ALL-NEXT: 0x1001 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 7198]
+ALL-NEXT: 0x1001 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 0x1C1E]
ALL-NEXT: udt = 0x1017, mod = 1, file = 1, line = 194
-ALL-NEXT: 0x1002 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 7180]
+ALL-NEXT: 0x1002 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 0x1C0C]
ALL-NEXT: udt = 0x1021, mod = 1, file = 1, line = 603
-ALL-NEXT: 0x1003 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 7191]
+ALL-NEXT: 0x1003 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 0x1C17]
ALL-NEXT: udt = 0x102C, mod = 1, file = 1, line = 1200
-ALL-NEXT: 0x1004 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 7201]
+ALL-NEXT: 0x1004 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 0x1C21]
ALL-NEXT: udt = 0x103A, mod = 1, file = 1, line = 540
-ALL-NEXT: 0x1005 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 7241]
+ALL-NEXT: 0x1005 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 0x1C49]
ALL-NEXT: udt = 0x1042, mod = 1, file = 1, line = 108
-ALL-NEXT: 0x1006 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 7249]
+ALL-NEXT: 0x1006 | LF_UDT_MOD_SRC_LINE [size = 20, hash = 0x1C51]
ALL-NEXT: udt = 0x104A, mod = 1, file = 1, line = 96
-ALL-NEXT: 0x1007 | LF_STRING_ID [size = 48, hash = 80727] ID: <no type>, String: d:\src\llvm\test\DebugInfo\PDB\Inputs
-ALL-NEXT: 0x1008 | LF_STRING_ID [size = 76, hash = 154177] ID: <no type>, String: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\cl.exe
-ALL-NEXT: 0x1009 | LF_STRING_ID [size = 20, hash = 75189] ID: <no type>, String: empty.cpp
-ALL-NEXT: 0x100A | LF_STRING_ID [size = 56, hash = 253662] ID: <no type>, String: d:\src\llvm\test\DebugInfo\PDB\Inputs\vc120.pdb
-ALL-NEXT: 0x100B | LF_STRING_ID [size = 252, hash = 193467] ID: <no type>, String: -Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows
-ALL-NEXT: 0x100C | LF_SUBSTR_LIST [size = 12, hash = 222705]
+ALL-NEXT: 0x1007 | LF_STRING_ID [size = 48, hash = 0x13B57] ID: <no type>, String: d:\src\llvm\test\DebugInfo\PDB\Inputs
+ALL-NEXT: 0x1008 | LF_STRING_ID [size = 76, hash = 0x25A41] ID: <no type>, String: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\cl.exe
+ALL-NEXT: 0x1009 | LF_STRING_ID [size = 20, hash = 0x125B5] ID: <no type>, String: empty.cpp
+ALL-NEXT: 0x100A | LF_STRING_ID [size = 56, hash = 0x3DEDE] ID: <no type>, String: d:\src\llvm\test\DebugInfo\PDB\Inputs\vc120.pdb
+ALL-NEXT: 0x100B | LF_STRING_ID [size = 252, hash = 0x2F3BB] ID: <no type>, String: -Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows
+ALL-NEXT: 0x100C | LF_SUBSTR_LIST [size = 12, hash = 0x365F1]
ALL-NEXT: 0x100B: `-Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows`
-ALL-NEXT: 0x100D | LF_STRING_ID [size = 96, hash = 186099] ID: 0x100C, String: Kits\8.1\include\um" -I"C:\Program Files (x86)\Windows Kits\8.1\include\winrt" -TP -X
-ALL-NEXT: 0x100E | LF_BUILDINFO [size = 28, hash = 257108]
+ALL-NEXT: 0x100D | LF_STRING_ID [size = 96, hash = 0x2D6F3] ID: 0x100C, String: Kits\8.1\include\um" -I"C:\Program Files (x86)\Windows Kits\8.1\include\winrt" -TP -X
+ALL-NEXT: 0x100E | LF_BUILDINFO [size = 28, hash = 0x3EC54]
ALL-NEXT: 0x1007: `d:\src\llvm\test\DebugInfo\PDB\Inputs`
ALL-NEXT: 0x1008: `C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\cl.exe`
ALL-NEXT: 0x1009: `empty.cpp`
@@ -472,13 +472,13 @@ ALL-NEXT: frontend = 18.0.31101.0, backend = 18.0.31101.0
ALL-NEXT: flags = security checks
ALL-NEXT: 120 | S_GPROC32 [size = 44] `main`
ALL-NEXT: parent = 0, end = 196, addr = 0001:0016, code size = 10
-ALL-NEXT: debug start = 3, debug end = 8, flags = has fp
+ALL-NEXT: type = `0x1001 (int ())`, debug start = 3, debug end = 8, flags = has fp
ALL-NEXT: 164 | S_FRAMEPROC [size = 32]
ALL-NEXT: size = 0, padding size = 0, offset to padding = 0
ALL-NEXT: bytes of callee saved registers = 0, exception handler addr = 0000:0000
ALL-NEXT: flags = has async eh | opt speed
ALL-NEXT: 196 | S_END [size = 4]
-ALL-NEXT: 200 | S_BUILDINFO [size = 8] BuildId = `4110`
+ALL-NEXT: 200 | S_BUILDINFO [size = 8] BuildId = `0x100E`
ALL-NEXT: Mod 0001 | `* Linker *`:
ALL-NEXT: 4 | S_OBJNAME [size = 20] sig=0, `* Linker *`
ALL-NEXT: 24 | S_COMPILE3 [size = 48]
diff --git a/test/DebugInfo/X86/DIModule.ll b/test/DebugInfo/X86/DIModule.ll
index 1fe7f0c5fabe4..eacdfe10f53bf 100644
--- a/test/DebugInfo/X86/DIModule.ll
+++ b/test/DebugInfo/X86/DIModule.ll
@@ -18,7 +18,7 @@ target triple = "x86_64-apple-macosx"
!1 = !DIFile(filename: "/llvm/tools/clang/test/Modules/<stdin>", directory: "/")
!2 = !{}
!3 = !{!4}
-!4 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !0, entity: !5, line: 5)
+!4 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !0, entity: !5, file: !1, line: 5)
!5 = !DIModule(scope: null, name: "DebugModule", configMacros: "-DMODULES=0", includePath: "/llvm/tools/clang/test/Modules/Inputs", isysroot: "/")
!6 = !{i32 2, !"Dwarf Version", i32 4}
!7 = !{i32 2, !"Debug Info Version", i32 3}
diff --git a/test/DebugInfo/X86/DIModuleContext.ll b/test/DebugInfo/X86/DIModuleContext.ll
index a63fd0f373cd3..02d4441c82343 100644
--- a/test/DebugInfo/X86/DIModuleContext.ll
+++ b/test/DebugInfo/X86/DIModuleContext.ll
@@ -24,7 +24,7 @@ target triple = "x86_64-apple-macosx"
!4 = !{}
!5 = !{!0}
!6 = !{!7}
-!7 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !2, entity: !8, line: 11)
+!7 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !2, entity: !8, file: !3, line: 11)
!8 = !DIModule(scope: null, name: "Module", includePath: ".", isysroot: "/")
!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !10, size: 64, align: 64)
!10 = !DICompositeType(tag: DW_TAG_structure_type, name: "s", scope: !8, file: !3, line: 1, flags: DIFlagFwdDecl)
diff --git a/test/DebugInfo/X86/fission-inline.ll b/test/DebugInfo/X86/fission-inline.ll
index 45e0127294d1f..eadcd15b2f215 100644
--- a/test/DebugInfo/X86/fission-inline.ll
+++ b/test/DebugInfo/X86/fission-inline.ll
@@ -110,7 +110,7 @@ attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!16 = !DIBasicType(tag: DW_TAG_base_type, name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
!17 = !DISubprogram(name: "f2<int>", linkageName: "_ZN3foo2f2IiEEvv", line: 10, isLocal: false, isDefinition: false, flags: DIFlagPrototyped, isOptimized: false, scopeLine: 10, file: !1, scope: !4, type: !12, templateParams: !14)
!18 = !{!19}
-!19 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 19, scope: !20, entity: !4)
+!19 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !1, line: 19, scope: !20, entity: !4)
!20 = distinct !DILexicalBlock(line: 16, column: 13, file: !1, scope: !21)
!21 = distinct !DILexicalBlock(line: 16, column: 7, file: !1, scope: !10)
!22 = !{i32 2, !"Dwarf Version", i32 4}
diff --git a/test/DebugInfo/X86/gnu-public-names.ll b/test/DebugInfo/X86/gnu-public-names.ll
index 533ab838a732d..df1d113538ea1 100644
--- a/test/DebugInfo/X86/gnu-public-names.ll
+++ b/test/DebugInfo/X86/gnu-public-names.ll
@@ -345,9 +345,9 @@ attributes #1 = { nounwind readnone }
!42 = !DINamespace(scope: !43)
!43 = !DINamespace(name: "outer", scope: null)
!44 = !{!45, !47}
-!45 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !2, entity: !46, line: 34)
+!45 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !2, entity: !46, file:!3, line: 34)
!46 = !DIGlobalVariable(name: "global_namespace_variable_decl", linkageName: "_ZN2ns30global_namespace_variable_declE", scope: !18, file: !3, line: 28, type: !9, isLocal: false, isDefinition: false)
-!47 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !43, entity: !42, line: 43)
+!47 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !43, entity: !42, file: !3, line: 43)
!48 = !{i32 2, !"Dwarf Version", i32 4}
!49 = !{i32 2, !"Debug Info Version", i32 3}
!50 = !{!"clang version 3.7.0 (trunk 234897) (llvm/trunk 234911)"}
diff --git a/test/DebugInfo/X86/lexical-block-file-inline.ll b/test/DebugInfo/X86/lexical-block-file-inline.ll
index 0f85a5573f032..9f040f41ec5ed 100644
--- a/test/DebugInfo/X86/lexical-block-file-inline.ll
+++ b/test/DebugInfo/X86/lexical-block-file-inline.ll
@@ -134,7 +134,7 @@ attributes #2 = { nounwind }
!8 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !9, file: !9, line: 6, type: !5, isLocal: false, isDefinition: true, scopeLine: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
!9 = !DIFile(filename: "test.h", directory: "/")
!10 = !{!11}
-!11 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !12, entity: !14, line: 1)
+!11 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !12, entity: !14, file: !1, line: 1)
!12 = !DILexicalBlockFile(scope: !13, file: !9, discriminator: 0)
!13 = distinct !DILexicalBlock(scope: !4, file: !1, line: 3)
!14 = !DINamespace(name: "N", scope: null)
diff --git a/test/DebugInfo/X86/pr19307.ll b/test/DebugInfo/X86/pr19307.ll
index a8278c9dcf834..c94572f7d23ca 100644
--- a/test/DebugInfo/X86/pr19307.ll
+++ b/test/DebugInfo/X86/pr19307.ll
@@ -105,26 +105,26 @@ attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!19 = !DIDerivedType(tag: DW_TAG_typedef, name: "string", line: 65, file: !20, scope: !10, baseType: !8)
!20 = !DIFile(filename: "/usr/lib/gcc/x86_64-linux-gnu/4.6/../../../../include/c++/4.6/bits/stringfwd.h", directory: "/llvm_cmake_gcc")
!21 = !{!22, !26, !29, !33, !38, !41}
-!22 = !DIImportedEntity(tag: DW_TAG_imported_module, line: 57, scope: !23, entity: !25)
+!22 = !DIImportedEntity(tag: DW_TAG_imported_module, file: !1, line: 57, scope: !23, entity: !25)
!23 = !DINamespace(name: "__gnu_debug", scope: null)
!24 = !DIFile(filename: "/usr/lib/gcc/x86_64-linux-gnu/4.6/../../../../include/c++/4.6/debug/debug.h", directory: "/llvm_cmake_gcc")
!25 = !DINamespace(name: "__debug", scope: !10)
-!26 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 66, scope: !10, entity: !27)
+!26 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !1, line: 66, scope: !10, entity: !27)
!27 = !DIDerivedType(tag: DW_TAG_typedef, name: "mbstate_t", line: 106, file: !5, baseType: !28)
!28 = !DIDerivedType(tag: DW_TAG_typedef, name: "__mbstate_t", line: 95, file: !5, baseType: !4)
-!29 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 141, scope: !10, entity: !30)
+!29 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !1, line: 141, scope: !10, entity: !30)
!30 = !DIDerivedType(tag: DW_TAG_typedef, name: "wint_t", line: 141, file: !31, baseType: !32)
!31 = !DIFile(filename: "/llvm_cmake_gcc/bin/../lib/clang/3.5.0/include/stddef.h", directory: "/llvm_cmake_gcc")
!32 = !DIBasicType(tag: DW_TAG_base_type, name: "unsigned int", size: 32, align: 32, encoding: DW_ATE_unsigned)
-!33 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 42, scope: !34, entity: !36)
+!33 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !1, line: 42, scope: !34, entity: !36)
!34 = !DINamespace(name: "__gnu_cxx", scope: null)
!35 = !DIFile(filename: "/usr/lib/gcc/x86_64-linux-gnu/4.6/../../../../include/c++/4.6/bits/cpp_type_traits.h", directory: "/llvm_cmake_gcc")
!36 = !DIDerivedType(tag: DW_TAG_typedef, name: "size_t", line: 155, file: !11, scope: !10, baseType: !37)
!37 = !DIBasicType(tag: DW_TAG_base_type, name: "long unsigned int", size: 64, align: 64, encoding: DW_ATE_unsigned)
-!38 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 43, scope: !34, entity: !39)
+!38 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !1, line: 43, scope: !34, entity: !39)
!39 = !DIDerivedType(tag: DW_TAG_typedef, name: "ptrdiff_t", line: 156, file: !11, scope: !10, baseType: !40)
!40 = !DIBasicType(tag: DW_TAG_base_type, name: "long int", size: 64, align: 64, encoding: DW_ATE_signed)
-!41 = !DIImportedEntity(tag: DW_TAG_imported_declaration, line: 55, scope: !10, entity: !6)
+!41 = !DIImportedEntity(tag: DW_TAG_imported_declaration, file: !1, line: 55, scope: !10, entity: !6)
!42 = !{i32 2, !"Dwarf Version", i32 4}
!43 = !{i32 2, !"Debug Info Version", i32 3}
!44 = !{!"clang version 3.5.0 (209308)"}
diff --git a/test/DllTool/coff-exports.def b/test/DllTool/coff-exports.def
new file mode 100644
index 0000000000000..0226886a523cd
--- /dev/null
+++ b/test/DllTool/coff-exports.def
@@ -0,0 +1,13 @@
+; RUN: llvm-dlltool -m i386:x86-64 --input-def %s --output-lib %t.a
+; RUN: llvm-readobj -coff-exports %t.a | FileCheck %s
+
+LIBRARY test.dll
+EXPORTS
+TestFunction
+
+; CHECK: File: test.dll
+; CHECK: Format: COFF-import-file
+; CHECK: Type: code
+; CHECK: Name type: name
+; CHECK: Symbol: __imp_TestFunction
+; CHECK: Symbol: TestFunction
diff --git a/test/DllTool/coff-weak-exports.def b/test/DllTool/coff-weak-exports.def
new file mode 100644
index 0000000000000..511d947d83959
--- /dev/null
+++ b/test/DllTool/coff-weak-exports.def
@@ -0,0 +1,19 @@
+; RUN: llvm-dlltool -m i386:x86-64 --input-def %s --output-lib %t.a
+; RUN: llvm-readobj -coff-exports %t.a | FileCheck %s
+
+LIBRARY test.dll
+EXPORTS
+TestFunction==AltTestFunction
+
+; CHECK: File: test.dll
+; CHECK: Format: COFF-x86-64
+; CHECK: Arch: x86_64
+; CHECK: AddressSize: 64bit
+; CHECK: File: test.dll
+; CHECK: Format: COFF-x86-64
+; CHECK: Arch: x86_64
+; CHECK: AddressSize: 64bit
+; CHECK: File: test.dll
+; CHECK: Format: COFF-x86-64
+; CHECK: Arch: x86_64
+; CHECK: AddressSize: 64bit
diff --git a/test/DllTool/lit.local.cfg b/test/DllTool/lit.local.cfg
new file mode 100644
index 0000000000000..482608486d219
--- /dev/null
+++ b/test/DllTool/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.def']
diff --git a/test/FileCheck/regex-scope.txt b/test/FileCheck/regex-scope.txt
index e77f3f6513a87..989f422c6bcd3 100644
--- a/test/FileCheck/regex-scope.txt
+++ b/test/FileCheck/regex-scope.txt
@@ -1,4 +1,4 @@
-// RUN: FileCheck -check-prefix CHECK -input-file %s %s
+// RUN: FileCheck -input-file %s %s
// RUN: FileCheck -check-prefixes CHECK,GLOBAL -input-file %s %s
// RUN: FileCheck -check-prefixes CHECK,LOCAL -input-file %s %s
// RUN: FileCheck -check-prefixes CHECK,GLOBAL --enable-var-scope -input-file %s %s
diff --git a/test/Instrumentation/AddressSanitizer/basic.ll b/test/Instrumentation/AddressSanitizer/basic.ll
index 9827e7a6792b8..5eb5388c87a82 100644
--- a/test/Instrumentation/AddressSanitizer/basic.ll
+++ b/test/Instrumentation/AddressSanitizer/basic.ll
@@ -170,6 +170,26 @@ define void @memintr_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
; CHECK: __asan_memcpy
; CHECK: ret void
+declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+
+define void @memintr_element_atomic_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
+ ; This is a canary test to make sure that these don't get lowered into calls that don't
+ ; have the element-atomic property. Eventually, asan will have to be enhanced to lower
+ ; these properly.
+ ; CHECK-LABEL: memintr_element_atomic_test
+ ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 100, i32 1)
+ ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 100, i32 1)
+ ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 100, i32 1)
+ ; CHECK-NEXT: ret void
+ tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 100, i32 1)
+ tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 100, i32 1)
+ tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 100, i32 1)
+ ret void
+}
+
+
; CHECK-LABEL: @test_swifterror
; CHECK-NOT: __asan_report_load
; CHECK: ret void
diff --git a/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll b/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll
new file mode 100644
index 0000000000000..32610ce3b815c
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/stack-poisoning-byval-args.ll
@@ -0,0 +1,48 @@
+; This check verifies that arguments passed by value get redzones.
+; RUN: opt < %s -asan -asan-realign-stack=32 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.A = type { [8 x i32] }
+
+declare i32 @bar(%struct.A*)
+
+; Test behavior for named argument with explicit alignment. The memcpy and
+; alloca alignments should match the explicit alignment of 64.
+define void @foo(%struct.A* byval align 64 %a) sanitize_address {
+entry:
+; CHECK-LABEL: foo
+; CHECK: call i64 @__asan_stack_malloc
+; CHECK: alloca i8, i64 {{.*}} align 64
+; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to %struct.A*
+; CHECK: [[copyBytePtr:%[^ \t]+]] = bitcast %struct.A* [[copyPtr]]
+; CHECK: [[aBytePtr:%[^ \t]+]] = bitcast %struct.A* %a
+; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyBytePtr]]{{[^%]+}}[[aBytePtr]],{{[^,]+}}, i32 64
+; CHECK: call i32 @bar(%struct.A* [[copyPtr]])
+; CHECK: ret void
+
+ %call = call i32 @bar(%struct.A* %a)
+ ret void
+}
+
+; Test behavior for unnamed argument without explicit alignment. In this case,
+; the first argument is referenced by the identifier %0 and the ABI requires a
+; minimum alignment of 4 bytes since struct.A contains i32s which have 4-byte
+; alignment. However, the alloca alignment will be 32 since that is the value
+; passed via the -asan-realign-stack option, which is greater than 4.
+define void @baz(%struct.A* byval) sanitize_address {
+entry:
+; CHECK-LABEL: baz
+; CHECK: call i64 @__asan_stack_malloc
+; CHECK: alloca i8, i64 {{.*}} align 32
+; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to %struct.A*
+; CHECK: [[copyBytePtr:%[^ \t]+]] = bitcast %struct.A* [[copyPtr]]
+; CHECK: [[aBytePtr:%[^ \t]+]] = bitcast %struct.A* %0
+; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyBytePtr]]{{[^%]+}}[[aBytePtr]],{{[^,]+}}, i32 4
+; CHECK: call i32 @bar(%struct.A* [[copyPtr]])
+; CHECK: ret void
+
+ %call = call i32 @bar(%struct.A* %0)
+ ret void
+}
diff --git a/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll b/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll
new file mode 100644
index 0000000000000..9b6e3db9050f1
--- /dev/null
+++ b/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll
@@ -0,0 +1,37 @@
+; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+;; Placeholder tests that will fail once element atomic @llvm.mem[move|set] instrinsics have
+;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
+;; verify that dfsan handles these intrinsics properly once they have been
+;; added to that class hierarchy.
+
+declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+
+define void @test_memcpy(i8* nocapture, i8* nocapture) {
+ ; CHECK-LABEL: dfs$test_memcpy
+ ; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64 16, i32 1)
+ ; CHECK: ret void
+ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64 16, i32 1)
+ ret void
+}
+
+define void @test_memmove(i8* nocapture, i8* nocapture) {
+ ; CHECK-LABEL: dfs$test_memmove
+ ; CHECK: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64 16, i32 1)
+ ; CHECK: ret void
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64 16, i32 1)
+ ret void
+}
+
+define void @test_memset(i8* nocapture) {
+ ; CHECK-LABEL: dfs$test_memset
+ ; CHECK: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %0, i8 88, i64 16, i32 1)
+ ; CHECK: ret void
+ call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %0, i8 88, i64 16, i32 1)
+ ret void
+}
diff --git a/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll b/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll
index 3457cfc7e278d..344ad86e99e40 100644
--- a/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll
+++ b/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll
@@ -234,6 +234,39 @@ entry:
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Ensure that esan doesn't convert element atomic memory intrinsics to
+; calls.
+
+declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+
+define void @elementAtomic_memCpyTest(i8* nocapture %x, i8* nocapture %y) {
+ ; CHECK-LABEL: elementAtomic_memCpyTest
+ ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
+ ret void
+}
+
+define void @elementAtomic_memMoveTest(i8* nocapture %x, i8* nocapture %y) {
+ ; CHECK-LABEL: elementAtomic_memMoveTest
+ ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
+ ret void
+}
+
+define void @elementAtomic_memSetTest(i8* nocapture %x) {
+ ; CHECK-LABEL: elementAtomic_memSetTest
+ ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
+ ret void
+}
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Top-level:
; CHECK: define internal void @esan.module_ctor()
diff --git a/test/Instrumentation/EfficiencySanitizer/working_set_slow.ll b/test/Instrumentation/EfficiencySanitizer/working_set_slow.ll
index 1c5978e52864c..22c8d5c59a167 100644
--- a/test/Instrumentation/EfficiencySanitizer/working_set_slow.ll
+++ b/test/Instrumentation/EfficiencySanitizer/working_set_slow.ll
@@ -251,6 +251,38 @@ entry:
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Ensure that esan doesn't convert element atomic memory intrinsics to
+; calls.
+
+declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+
+define void @elementAtomic_memCpyTest(i8* nocapture %x, i8* nocapture %y) {
+ ; CHECK-LABEL: elementAtomic_memCpyTest
+ ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
+ ret void
+}
+
+define void @elementAtomic_memMoveTest(i8* nocapture %x, i8* nocapture %y) {
+ ; CHECK-LABEL: elementAtomic_memMoveTest
+ ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 1 %y, i64 16, i32 1)
+ ret void
+}
+
+define void @elementAtomic_memSetTest(i8* nocapture %x) {
+ ; CHECK-LABEL: elementAtomic_memSetTest
+ ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 77, i64 16, i32 1)
+ ret void
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Top-level:
; CHECK: define internal void @esan.module_ctor()
diff --git a/test/Instrumentation/MemorySanitizer/msan_basic.ll b/test/Instrumentation/MemorySanitizer/msan_basic.ll
index ffb239a152563..47912b5b6901d 100644
--- a/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -238,6 +238,41 @@ declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
; CHECK: call i8* @__msan_memmove
; CHECK: ret void
+;; ------------
+;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] instrinsics have
+;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
+;; verify that MSAN handles these intrinsics properly once they have been
+;; added to that class hierarchy.
+declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+
+define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
+ ; CHECK-LABEL: atomic_memcpy
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
+ ret void
+}
+
+define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
+ ; CHECK-LABEL: atomic_memmove
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
+ ret void
+}
+
+define void @atomic_memset(i8* nocapture %x) nounwind {
+ ; CHECK-LABEL: atomic_memset
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
+ ret void
+}
+
+;; ------------
+
; Check that we propagate shadow for "select"
diff --git a/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll b/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll
new file mode 100644
index 0000000000000..badf07db2c155
--- /dev/null
+++ b/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll
@@ -0,0 +1,22 @@
+; Test -sanitizer-coverage-trace-compares=1 API declarations on a non-x86_64 arch
+; RUN: opt < %s -sancov -sanitizer-coverage-level=1 -sanitizer-coverage-trace-compares=1 -S | FileCheck %s
+
+target triple = "x86_32-unknown-linux-gnu"
+define i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+; CHECK: declare void @__sanitizer_cov_trace_pc_indir(i64)
+; CHECK: declare void @__sanitizer_cov_trace_cmp1(i8, i8)
+; CHECK: declare void @__sanitizer_cov_trace_cmp2(i16, i16)
+; CHECK: declare void @__sanitizer_cov_trace_cmp4(i32, i32)
+; CHECK: declare void @__sanitizer_cov_trace_cmp8(i64, i64)
+; CHECK: declare void @__sanitizer_cov_trace_div4(i32)
+; CHECK: declare void @__sanitizer_cov_trace_div8(i64)
+; CHECK: declare void @__sanitizer_cov_trace_gep(i64)
+; CHECK: declare void @__sanitizer_cov_trace_switch(i64, i64*)
+; CHECK: declare void @__sanitizer_cov_trace_pc()
+; CHECK: declare void @__sanitizer_cov_trace_pc_guard(i32*)
+; CHECK: declare void @__sanitizer_cov_trace_pc_guard_init(i32*, i32*)
+; CHECK-NOT: declare
diff --git a/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll b/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll
new file mode 100644
index 0000000000000..16689f9831d8e
--- /dev/null
+++ b/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll
@@ -0,0 +1,22 @@
+; Test -sanitizer-coverage-trace-compares=1 API declarations on x86_64
+; RUN: opt < %s -sancov -sanitizer-coverage-level=1 -sanitizer-coverage-trace-compares=1 -S | FileCheck %s
+
+target triple = "x86_64-unknown-linux-gnu"
+define i32 @foo() #0 {
+entry:
+ ret i32 0
+}
+
+; CHECK: declare void @__sanitizer_cov_trace_pc_indir(i64)
+; CHECK: declare void @__sanitizer_cov_trace_cmp1(i8 zeroext, i8 zeroext)
+; CHECK: declare void @__sanitizer_cov_trace_cmp2(i16 zeroext, i16 zeroext)
+; CHECK: declare void @__sanitizer_cov_trace_cmp4(i32 zeroext, i32 zeroext)
+; CHECK: declare void @__sanitizer_cov_trace_cmp8(i64, i64)
+; CHECK: declare void @__sanitizer_cov_trace_div4(i32 zeroext)
+; CHECK: declare void @__sanitizer_cov_trace_div8(i64)
+; CHECK: declare void @__sanitizer_cov_trace_gep(i64)
+; CHECK: declare void @__sanitizer_cov_trace_switch(i64, i64*)
+; CHECK: declare void @__sanitizer_cov_trace_pc()
+; CHECK: declare void @__sanitizer_cov_trace_pc_guard(i32*)
+; CHECK: declare void @__sanitizer_cov_trace_pc_guard_init(i32*, i32*)
+; CHECK-NOT: declare
diff --git a/test/Linker/pr26037.ll b/test/Linker/pr26037.ll
index 0e6da17e9fb7e..da771669574f3 100644
--- a/test/Linker/pr26037.ll
+++ b/test/Linker/pr26037.ll
@@ -44,13 +44,13 @@ entry:
!7 = !{null}
!8 = distinct !DISubprogram(name: "b", linkageName: "_ZN1A1bEv", scope: !5, file: !1, line: 8, type: !6, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
!9 = !{!10, !16}
-!10 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !8, entity: !4, line: 8)
+!10 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !8, entity: !4, file: !1, line: 8)
!11 = !{i32 2, !"Dwarf Version", i32 4}
!12 = !{i32 2, !"Debug Info Version", i32 3}
!13 = !{!"clang version 3.8.0 (trunk 256934) (llvm/trunk 256936)"}
!14 = !DILocation(line: 7, column: 12, scope: !4)
!15 = !DILocation(line: 8, column: 24, scope: !8)
-!16 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !17, entity: !19, line: 8)
+!16 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !17, entity: !19, file: !1, line: 8)
!17 = distinct !DILexicalBlock(scope: !18, file: !1, line: 9, column: 8)
!18 = distinct !DISubprogram(name: "c", linkageName: "_ZN1A1cEv", scope: !5, file: !1, line: 9, type: !6, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
!19 = distinct !DILexicalBlock(scope: !20, file: !1, line: 10, column: 8)
diff --git a/test/MC/AArch64/coff-relocations.s b/test/MC/AArch64/coff-relocations.s
new file mode 100644
index 0000000000000..221ecfd4cd41f
--- /dev/null
+++ b/test/MC/AArch64/coff-relocations.s
@@ -0,0 +1,52 @@
+; RUN: llvm-mc -triple aarch64-windows -filetype obj -o - %s | \
+; RUN: llvm-readobj -r - | FileCheck %s
+
+; IMAGE_REL_ARM64_ADDR32
+.Linfo_foo:
+ .asciz "foo"
+ .long foo
+
+; IMAGE_REL_ARM64_ADDR32NB
+.long func@IMGREL
+
+; IMAGE_REL_ARM64_ADDR64
+.globl struc
+struc:
+ .quad arr
+
+; IMAGE_REL_ARM64_BRANCH26
+b target
+
+; IMAGE_REL_ARM64_PAGEBASE_REL21
+adrp x0, foo
+
+; IMAGE_REL_ARM64_PAGEOFFSET_12A
+add x0, x0, :lo12:foo
+
+; IMAGE_REL_ARM64_PAGEOFFSET_12L
+ldr x0, [x0, :lo12:foo]
+
+; IMAGE_REL_ARM64_SECREL
+.secrel32 .Linfo_bar
+.Linfo_bar:
+
+; IMAGE_REL_ARM64_SECTION
+.secidx func
+
+
+; CHECK: Format: COFF-ARM64
+; CHECK: Arch: aarch64
+; CHECK: AddressSize: 64bit
+; CHECK: Relocations [
+; CHECK: Section (1) .text {
+; CHECK: 0x4 IMAGE_REL_ARM64_ADDR32 foo
+; CHECK: 0x8 IMAGE_REL_ARM64_ADDR32NB func
+; CHECK: 0xC IMAGE_REL_ARM64_ADDR64 arr
+; CHECK: 0x14 IMAGE_REL_ARM64_BRANCH26 target
+; CHECK: 0x18 IMAGE_REL_ARM64_PAGEBASE_REL21 foo
+; CHECK: 0x1C IMAGE_REL_ARM64_PAGEOFFSET_12A foo
+; CHECK: 0x20 IMAGE_REL_ARM64_PAGEOFFSET_12L foo
+; CHECK: 0x24 IMAGE_REL_ARM64_SECREL .text
+; CHECK: 0x28 IMAGE_REL_ARM64_SECTION func
+; CHECK: }
+; CHECK: ]
diff --git a/test/MC/AArch64/invalid-instructions-spellcheck.s b/test/MC/AArch64/invalid-instructions-spellcheck.s
new file mode 100644
index 0000000000000..8acb285ac9a6d
--- /dev/null
+++ b/test/MC/AArch64/invalid-instructions-spellcheck.s
@@ -0,0 +1,37 @@
+// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -triple=aarch64 -mattr=-neon -show-encoding < %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-NEON
+
+// This tests the mnemonic spell checker.
+
+// First check what happens when an instruction is omitted:
+
+ w1, w2, w3
+
+// CHECK: error: unknown token in expression
+// CHECK-NEXT: w1, w2, w3
+// CHECK-NEXT: ^
+// CHECK-NEXT: error: invalid operand
+// CHECK-NEXT: w1, w2, w3
+// CHECK-NEXT: ^
+
+// We don't want to see a suggestion here; the edit distance is too large to
+// give sensible suggestions:
+
+ addddddddd w1, w2, w3
+
+// CHECK: error: unrecognized instruction mnemonic
+// CHECK-NEXT: addddddddd w1, w2, w3
+// CHECK-NEXT: ^
+
+ addd w1, w2, w3
+
+// CHECK: error: unrecognized instruction mnemonic, did you mean: add, addp, adds, addv, fadd, madd?
+// CHECK-NEXT: addd w1, w2, w3
+// CHECK-NEXT: ^
+
+// Instructions 'addv' and 'addp' are only available when NEON is enabled, so we
+// don't want to see them here:
+
+// CHECK-NO-NEON: error: unrecognized instruction mnemonic, did you mean: add, adds, fadd, madd?
+// CHECK-NO-NEON-NEXT: addd w1, w2, w3
+// CHECK-NO-NEON-NEXT: ^
diff --git a/test/MC/AMDGPU/gfx9_asm_all.s b/test/MC/AMDGPU/gfx9_asm_all.s
index 56484a37bdcea..40bc2f8e159a1 100644
--- a/test/MC/AMDGPU/gfx9_asm_all.s
+++ b/test/MC/AMDGPU/gfx9_asm_all.s
@@ -105392,3 +105392,354 @@ v_mad_mixlo_f16 v5, |v1|, |v2|, |v3|
v_mad_mixlo_f16 v5, v1, v2, v3 clamp
// CHECK: [0x05,0xc0,0xa1,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v1, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v255, v1, v2, v3
+// CHECK: [0xff,0x40,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v255, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0xff,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, s1, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, s101, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x65,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x66,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x67,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x6a,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x6b,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, m0, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x7c,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x7e,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x7f,0x04,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v1, v255, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xff,0x0f,0x1c]
+
+v_pk_mad_i16 v5, v1, s2, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, s101, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xcb,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, flat_scratch_lo, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xcd,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, flat_scratch_hi, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xcf,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, vcc_lo, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xd5,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, vcc_hi, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xd7,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, m0, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xf9,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, exec_lo, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xfd,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, exec_hi, v3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0xff,0x0c,0x1c]
+
+v_pk_mad_i16 v5, v1, v2, v255
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0xfe,0x1f]
+
+v_pk_mad_i16 v5, v1, v2, s3
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x18]
+
+v_pk_mad_i16 v5, v1, v2, s101
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x96,0x19]
+
+v_pk_mad_i16 v5, v1, v2, flat_scratch_lo
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x9a,0x19]
+
+v_pk_mad_i16 v5, v1, v2, flat_scratch_hi
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x9e,0x19]
+
+v_pk_mad_i16 v5, v1, v2, vcc_lo
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0xaa,0x19]
+
+v_pk_mad_i16 v5, v1, v2, vcc_hi
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0xae,0x19]
+
+v_pk_mad_i16 v5, v1, v2, m0
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0xf2,0x19]
+
+v_pk_mad_i16 v5, v1, v2, exec_lo
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0xfa,0x19]
+
+v_pk_mad_i16 v5, v1, v2, exec_hi
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0xfe,0x19]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel:[0,0,0]
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel:[1,0,0]
+// CHECK: [0x05,0x48,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel:[0,1,0]
+// CHECK: [0x05,0x50,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel:[0,0,1]
+// CHECK: [0x05,0x60,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel:[1,1,1]
+// CHECK: [0x05,0x78,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[1,1,1]
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[0,0,0]
+// CHECK: [0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x04]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[1,0,0]
+// CHECK: [0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x0c]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[0,1,0]
+// CHECK: [0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x14]
+
+v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[0,0,1]
+// CHECK: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x04]
+
+v_pk_mad_i16 v5, v1, v2, v3 clamp
+// CHECK: [0x05,0xc0,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v1, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v255, v1, v2, v3
+// CHECK: [0xff,0x40,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v255, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0xff,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, s1, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, s101, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x65,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x66,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x67,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x6a,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x6b,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, m0, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x7c,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x7e,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x7f,0x04,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v1, v255, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xff,0x0f,0x1c]
+
+v_pk_mad_u16 v5, v1, s2, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, s101, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xcb,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, flat_scratch_lo, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xcd,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, flat_scratch_hi, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xcf,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, vcc_lo, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xd5,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, vcc_hi, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xd7,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, m0, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xf9,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, exec_lo, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xfd,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, exec_hi, v3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0xff,0x0c,0x1c]
+
+v_pk_mad_u16 v5, v1, v2, v255
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0xfe,0x1f]
+
+v_pk_mad_u16 v5, v1, v2, s3
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x18]
+
+v_pk_mad_u16 v5, v1, v2, s101
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x96,0x19]
+
+v_pk_mad_u16 v5, v1, v2, flat_scratch_lo
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x9a,0x19]
+
+v_pk_mad_u16 v5, v1, v2, flat_scratch_hi
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x9e,0x19]
+
+v_pk_mad_u16 v5, v1, v2, vcc_lo
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0xaa,0x19]
+
+v_pk_mad_u16 v5, v1, v2, vcc_hi
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0xae,0x19]
+
+v_pk_mad_u16 v5, v1, v2, m0
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0xf2,0x19]
+
+v_pk_mad_u16 v5, v1, v2, exec_lo
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0xfa,0x19]
+
+v_pk_mad_u16 v5, v1, v2, exec_hi
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0xfe,0x19]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel:[0,0,0]
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel:[1,0,0]
+// CHECK: [0x05,0x48,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel:[0,1,0]
+// CHECK: [0x05,0x50,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel:[0,0,1]
+// CHECK: [0x05,0x60,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel:[1,1,1]
+// CHECK: [0x05,0x78,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[1,1,1]
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[0,0,0]
+// CHECK: [0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x04]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[1,0,0]
+// CHECK: [0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x0c]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[0,1,0]
+// CHECK: [0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x14]
+
+v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[0,0,1]
+// CHECK: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x04]
+
+v_pk_mad_u16 v5, v1, v2, v3 clamp
+// CHECK: [0x05,0xc0,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_sub_u16 v5, v1, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_sub_u16 v255, v1, v2
+// CHECK: [0xff,0x00,0x8b,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_sub_u16 v5, v255, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0xff,0x05,0x02,0x18]
+
+v_pk_sub_u16 v5, s1, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, s101, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x65,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, flat_scratch_lo, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x66,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, flat_scratch_hi, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x67,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, vcc_lo, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x6a,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, vcc_hi, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x6b,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, m0, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x7c,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, exec_lo, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x7e,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, exec_hi, v2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x7f,0x04,0x02,0x18]
+
+v_pk_sub_u16 v5, v1, v255
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xff,0x03,0x18]
+
+v_pk_sub_u16 v5, v1, s2
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, s101
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xcb,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xcd,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xcf,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xd5,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xd7,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, m0
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xf9,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xfd,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0xff,0x00,0x18]
+
+v_pk_sub_u16 v5, v1, v2 op_sel:[0,0]
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_sub_u16 v5, v1, v2 op_sel:[1,0]
+// CHECK: [0x05,0x08,0x8b,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_sub_u16 v5, v1, v2 op_sel:[0,1]
+// CHECK: [0x05,0x10,0x8b,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_sub_u16 v5, v1, v2 op_sel:[1,1]
+// CHECK: [0x05,0x18,0x8b,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_sub_u16 v5, v1, v2 op_sel_hi:[1,1]
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_sub_u16 v5, v1, v2 op_sel_hi:[0,0]
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x00]
+
+v_pk_sub_u16 v5, v1, v2 op_sel_hi:[1,0]
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x08]
+
+v_pk_sub_u16 v5, v1, v2 op_sel_hi:[0,1]
+// CHECK: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x10]
+
+v_pk_sub_u16 v5, v1, v2 clamp
+// CHECK: [0x05,0x80,0x8b,0xd3,0x01,0x05,0x02,0x18]
diff --git a/test/MC/AMDGPU/vop3-errs.s b/test/MC/AMDGPU/vop3-errs.s
index 7ba577049af35..855dd0b5de086 100644
--- a/test/MC/AMDGPU/vop3-errs.s
+++ b/test/MC/AMDGPU/vop3-errs.s
@@ -1,35 +1,47 @@
-// RUN: not llvm-mc -arch=amdgcn -show-encoding %s 2>&1 | FileCheck %s
-// RUN: not llvm-mc -arch=amdgcn -mcpu=SI -show-encoding %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -arch=amdgcn -show-encoding %s 2>&1 | FileCheck %s --check-prefix=GFX67 --check-prefix=GCN
+// RUN: not llvm-mc -arch=amdgcn -mcpu=SI -show-encoding %s 2>&1 | FileCheck %s --check-prefix=GFX67 --check-prefix=GCN
+// RUN: not llvm-mc -arch=amdgcn -mcpu=fiji -show-encoding %s 2>&1 | FileCheck %s --check-prefix=GFX89 --check-prefix=GCN
+// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx900 -show-encoding %s 2>&1 | FileCheck %s --check-prefix=GFX89 --check-prefix=GCN
v_add_f32_e64 v0, v1
-// CHECK: error: too few operands for instruction
+// GCN: error: too few operands for instruction
v_div_scale_f32 v24, vcc, v22, 1.1, v22
-// CHECK: error: invalid operand for instruction
+// GCN: error: invalid operand for instruction
v_mqsad_u32_u8 v[0:3], s[2:3], v4, v[0:3]
-// CHECK: error: instruction not supported on this GPU
+// GFX67: error: instruction not supported on this GPU
+// GFX89: error: destination must be different than all sources
v_mqsad_pk_u16_u8 v[0:1], v[1:2], v9, v[4:5]
-// CHECK: error: destination must be different than all sources
+// GCN: error: destination must be different than all sources
v_mqsad_pk_u16_u8 v[1:2], v[1:2], v9, v[4:5]
-// CHECK: error: destination must be different than all sources
+// GCN: error: destination must be different than all sources
v_mqsad_pk_u16_u8 v[2:3], v[1:2], v9, v[4:5]
-// CHECK: error: destination must be different than all sources
+// GCN: error: destination must be different than all sources
v_mqsad_pk_u16_u8 v[3:4], v[0:1], v9, v[4:5]
-// CHECK: error: destination must be different than all sources
+// GCN: error: destination must be different than all sources
v_mqsad_pk_u16_u8 v[4:5], v[1:2], v9, v[4:5]
-// CHECK: error: destination must be different than all sources
+// GCN: error: destination must be different than all sources
v_mqsad_pk_u16_u8 v[5:6], v[1:2], v9, v[4:5]
-// CHECK: error: destination must be different than all sources
+// GCN: error: destination must be different than all sources
v_mqsad_pk_u16_u8 v[8:9], v[1:2], v9, v[4:5]
-// CHECK: error: destination must be different than all sources
+// GCN: error: destination must be different than all sources
v_mqsad_pk_u16_u8 v[9:10], v[1:2], v9, v[4:5]
-// CHECK: error: destination must be different than all sources
+// GCN: error: destination must be different than all sources
+
+v_cmp_eq_f32_e64 vcc, v0, v1 mul:2
+// GCN: error: invalid operand for instruction
+
+v_cmp_le_f64_e64 vcc, v0, v1 mul:4
+// GCN: error: invalid operand for instruction
+
+v_cvt_u32_f32_e64 v0, v1 div:2
+// GCN: error: invalid operand for instruction \ No newline at end of file
diff --git a/test/MC/ARM/virtexts-thumb.s b/test/MC/ARM/virtexts-thumb.s
index d911e1dfb1d72..0ea0bafe5a865 100644
--- a/test/MC/ARM/virtexts-thumb.s
+++ b/test/MC/ARM/virtexts-thumb.s
@@ -50,7 +50,7 @@
# CHECK-THUMB: [0xde,0xf3,0x00,0x8f]
# SUBS PC, LR, #0 should have the same encoding as ERET.
-# The conditional forms can't be tested becuse the ARM assembler parser doesn't
+# The conditional forms can't be tested because the ARM assembler parser doesn't
# accept SUBS<cond> PC, LR, #<imm>, only the unconditonal form is allowed. This
# is due to the way that the custom parser handles optional operands; see the
# FIXME in ARM/AsmParser/ARMAsmParser.cpp.
diff --git a/test/MC/Disassembler/AMDGPU/gfx9_dasm_all.txt b/test/MC/Disassembler/AMDGPU/gfx9_dasm_all.txt
index 63374300504c2..188534706eb52 100644
--- a/test/MC/Disassembler/AMDGPU/gfx9_dasm_all.txt
+++ b/test/MC/Disassembler/AMDGPU/gfx9_dasm_all.txt
@@ -88934,3 +88934,408 @@
# CHECK: v_pk_sub_i16 v5, v1, v2 clamp ; encoding: [0x05,0x80,0x83,0xd3,0x01,0x05,0x02,0x18]
0x05,0x80,0x83,0xd3,0x01,0x05,0x02,0x18
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v255, v1, v2, v3 ; encoding: [0xff,0x40,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+0xff,0x40,0x80,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v255, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0xff,0x05,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0xff,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, s1, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, s101, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x65,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x65,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, flat_scratch_lo, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x66,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x66,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, flat_scratch_hi, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x67,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x67,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, vcc_lo, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x6a,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x6a,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, vcc_hi, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x6b,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x6b,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, m0, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x7c,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x7c,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, exec_lo, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x7e,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x7e,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, exec_hi, v2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x7f,0x04,0x0e,0x1c]
+0x05,0x40,0x80,0xd3,0x7f,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, v255, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xff,0x0f,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xff,0x0f,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, s2, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0x05,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, s101, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xcb,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xcb,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, flat_scratch_lo, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xcd,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xcd,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, flat_scratch_hi, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xcf,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xcf,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, vcc_lo, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xd5,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xd5,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, vcc_hi, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xd7,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xd7,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, m0, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xf9,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xf9,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, exec_lo, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xfd,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xfd,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, exec_hi, v3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0xff,0x0c,0x1c]
+0x05,0x40,0x80,0xd3,0x01,0xff,0x0c,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v255 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0xfe,0x1f]
+0x05,0x40,0x80,0xd3,0x01,0x05,0xfe,0x1f
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, s3 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x18]
+0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x18
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, s101 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0x96,0x19]
+0x05,0x40,0x80,0xd3,0x01,0x05,0x96,0x19
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, flat_scratch_lo ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0x9a,0x19]
+0x05,0x40,0x80,0xd3,0x01,0x05,0x9a,0x19
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, flat_scratch_hi ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0x9e,0x19]
+0x05,0x40,0x80,0xd3,0x01,0x05,0x9e,0x19
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, vcc_lo ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0xaa,0x19]
+0x05,0x40,0x80,0xd3,0x01,0x05,0xaa,0x19
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, vcc_hi ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0xae,0x19]
+0x05,0x40,0x80,0xd3,0x01,0x05,0xae,0x19
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, m0 ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0xf2,0x19]
+0x05,0x40,0x80,0xd3,0x01,0x05,0xf2,0x19
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, exec_lo ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0xfa,0x19]
+0x05,0x40,0x80,0xd3,0x01,0x05,0xfa,0x19
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, exec_hi ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0xfe,0x19]
+0x05,0x40,0x80,0xd3,0x01,0x05,0xfe,0x19
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x05,0x48,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x48,0x80,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 op_sel:[0,1,0] ; encoding: [0x05,0x50,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x50,0x80,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x05,0x60,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x60,0x80,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 op_sel:[1,1,1] ; encoding: [0x05,0x78,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x78,0x80,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[0,0,0] ; encoding: [0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x04]
+0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x04
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[1,0,0] ; encoding: [0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x0c]
+0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x0c
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[0,1,0] ; encoding: [0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x14]
+0x05,0x00,0x80,0xd3,0x01,0x05,0x0e,0x14
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 op_sel_hi:[0,0,1] ; encoding: [0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x04]
+0x05,0x40,0x80,0xd3,0x01,0x05,0x0e,0x04
+
+# CHECK: v_pk_mad_i16 v5, v1, v2, v3 clamp ; encoding: [0x05,0xc0,0x80,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0xc0,0x80,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v255, v1, v2, v3 ; encoding: [0xff,0x40,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+0xff,0x40,0x89,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v255, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0xff,0x05,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0xff,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, s1, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, s101, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x65,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x65,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, flat_scratch_lo, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x66,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x66,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, flat_scratch_hi, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x67,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x67,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, vcc_lo, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x6a,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x6a,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, vcc_hi, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x6b,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x6b,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, m0, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x7c,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x7c,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, exec_lo, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x7e,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x7e,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, exec_hi, v2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x7f,0x04,0x0e,0x1c]
+0x05,0x40,0x89,0xd3,0x7f,0x04,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, v255, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xff,0x0f,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xff,0x0f,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, s2, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0x05,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, s101, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xcb,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xcb,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, flat_scratch_lo, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xcd,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xcd,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, flat_scratch_hi, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xcf,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xcf,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, vcc_lo, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xd5,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xd5,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, vcc_hi, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xd7,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xd7,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, m0, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xf9,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xf9,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, exec_lo, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xfd,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xfd,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, exec_hi, v3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0xff,0x0c,0x1c]
+0x05,0x40,0x89,0xd3,0x01,0xff,0x0c,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v255 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0xfe,0x1f]
+0x05,0x40,0x89,0xd3,0x01,0x05,0xfe,0x1f
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, s3 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x18]
+0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x18
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, s101 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0x96,0x19]
+0x05,0x40,0x89,0xd3,0x01,0x05,0x96,0x19
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, flat_scratch_lo ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0x9a,0x19]
+0x05,0x40,0x89,0xd3,0x01,0x05,0x9a,0x19
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, flat_scratch_hi ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0x9e,0x19]
+0x05,0x40,0x89,0xd3,0x01,0x05,0x9e,0x19
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, vcc_lo ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0xaa,0x19]
+0x05,0x40,0x89,0xd3,0x01,0x05,0xaa,0x19
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, vcc_hi ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0xae,0x19]
+0x05,0x40,0x89,0xd3,0x01,0x05,0xae,0x19
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, m0 ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0xf2,0x19]
+0x05,0x40,0x89,0xd3,0x01,0x05,0xf2,0x19
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, exec_lo ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0xfa,0x19]
+0x05,0x40,0x89,0xd3,0x01,0x05,0xfa,0x19
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, exec_hi ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0xfe,0x19]
+0x05,0x40,0x89,0xd3,0x01,0x05,0xfe,0x19
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x05,0x48,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x48,0x89,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 op_sel:[0,1,0] ; encoding: [0x05,0x50,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x50,0x89,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x05,0x60,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x60,0x89,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 op_sel:[1,1,1] ; encoding: [0x05,0x78,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0x78,0x89,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[0,0,0] ; encoding: [0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x04]
+0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x04
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[1,0,0] ; encoding: [0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x0c]
+0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x0c
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[0,1,0] ; encoding: [0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x14]
+0x05,0x00,0x89,0xd3,0x01,0x05,0x0e,0x14
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 op_sel_hi:[0,0,1] ; encoding: [0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x04]
+0x05,0x40,0x89,0xd3,0x01,0x05,0x0e,0x04
+
+# CHECK: v_pk_mad_u16 v5, v1, v2, v3 clamp ; encoding: [0x05,0xc0,0x89,0xd3,0x01,0x05,0x0e,0x1c]
+0x05,0xc0,0x89,0xd3,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_pk_sub_u16 v5, v1, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v255, v1, v2 ; encoding: [0xff,0x00,0x8b,0xd3,0x01,0x05,0x02,0x18]
+0xff,0x00,0x8b,0xd3,0x01,0x05,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, v255, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0xff,0x05,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0xff,0x05,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, s1, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, s101, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x65,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x65,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, flat_scratch_lo, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x66,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x66,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, flat_scratch_hi, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x67,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x67,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, vcc_lo, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x6a,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x6a,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, vcc_hi, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x6b,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x6b,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, m0, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x7c,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x7c,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, exec_lo, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x7e,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x7e,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, exec_hi, v2 ; encoding: [0x05,0x00,0x8b,0xd3,0x7f,0x04,0x02,0x18]
+0x05,0x00,0x8b,0xd3,0x7f,0x04,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, v255 ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xff,0x03,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xff,0x03,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, s2 ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0x05,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, s101 ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xcb,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xcb,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, flat_scratch_lo ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xcd,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xcd,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, flat_scratch_hi ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xcf,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xcf,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, vcc_lo ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xd5,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xd5,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, vcc_hi ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xd7,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xd7,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, m0 ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xf9,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xf9,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, exec_lo ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xfd,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xfd,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, exec_hi ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0xff,0x00,0x18]
+0x05,0x00,0x8b,0xd3,0x01,0xff,0x00,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, v2 op_sel:[1,0] ; encoding: [0x05,0x08,0x8b,0xd3,0x01,0x05,0x02,0x18]
+0x05,0x08,0x8b,0xd3,0x01,0x05,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, v2 op_sel:[0,1] ; encoding: [0x05,0x10,0x8b,0xd3,0x01,0x05,0x02,0x18]
+0x05,0x10,0x8b,0xd3,0x01,0x05,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, v2 op_sel:[1,1] ; encoding: [0x05,0x18,0x8b,0xd3,0x01,0x05,0x02,0x18]
+0x05,0x18,0x8b,0xd3,0x01,0x05,0x02,0x18
+
+# CHECK: v_pk_sub_u16 v5, v1, v2 op_sel_hi:[0,0] ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x00]
+0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x00
+
+# CHECK: v_pk_sub_u16 v5, v1, v2 op_sel_hi:[1,0] ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x08]
+0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x08
+
+# CHECK: v_pk_sub_u16 v5, v1, v2 op_sel_hi:[0,1] ; encoding: [0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x10]
+0x05,0x00,0x8b,0xd3,0x01,0x05,0x02,0x10
+
+# CHECK: v_pk_sub_u16 v5, v1, v2 clamp ; encoding: [0x05,0x80,0x8b,0xd3,0x01,0x05,0x02,0x18]
+0x05,0x80,0x8b,0xd3,0x01,0x05,0x02,0x18
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0x05,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0x05,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], s[2:3], v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x02,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0x02,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], s[4:5], v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x04,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0x04,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], s[100:101], v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x64,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0x64,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], flat_scratch, v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x66,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0x66,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], vcc, v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x6a,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0x6a,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], exec, v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x7e,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0x7e,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], 0, v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x80,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0x80,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], -1, v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0xc1,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0xc1,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], 0.5, v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0xf0,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0xf0,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], -4.0, v2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0xf7,0x04,0x0e,0x04]
+0xfc,0x00,0xe7,0xd1,0xf7,0x04,0x0e,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], s2, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0x05,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0x05,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], s101, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xcb,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xcb,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], flat_scratch_lo, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xcd,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xcd,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], flat_scratch_hi, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xcf,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xcf,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], vcc_lo, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xd5,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xd5,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], vcc_hi, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xd7,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xd7,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], m0, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xf9,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xf9,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], exec_lo, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xfd,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xfd,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], exec_hi, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xff,0x0c,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xff,0x0c,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], 0, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0x01,0x0d,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0x01,0x0d,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], -1, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0x83,0x0d,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0x83,0x0d,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], 0.5, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xe1,0x0d,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xe1,0x0d,0x04
+
+# CHECK: v_mqsad_u32_u8 v[252:255], v[1:2], -4.0, v[3:6] ; encoding: [0xfc,0x00,0xe7,0xd1,0x01,0xef,0x0d,0x04]
+0xfc,0x00,0xe7,0xd1,0x01,0xef,0x0d,0x04
diff --git a/test/MC/Disassembler/Mips/mt/valid-r2-el.txt b/test/MC/Disassembler/Mips/mt/valid-r2-el.txt
index 62e7092086aa0..7025354d68474 100644
--- a/test/MC/Disassembler/Mips/mt/valid-r2-el.txt
+++ b/test/MC/Disassembler/Mips/mt/valid-r2-el.txt
@@ -10,23 +10,4 @@
0x08 0x10 0x65 0x7c # CHECK: fork $2, $3, $5
0x09 0x00 0x80 0x7c # CHECK: yield $4
0x09 0x20 0xa0 0x7c # CHECK: yield $4, $5
-0x02 0x20 0x05 0x41 # CHECK: mftr $4, $5, 0, 2, 0
-0x20 0x20 0x05 0x41 # CHECK: mftr $4, $5, 1, 0, 0
-0x21 0x20 0x00 0x41 # CHECK: mftr $4, $zero, 1, 1, 0
-0x21 0x20 0x0a 0x41 # CHECK: mftr $4, $10, 1, 1, 0
-0x22 0x20 0x0a 0x41 # CHECK: mftr $4, $10, 1, 2, 0
-0x32 0x20 0x0a 0x41 # CHECK: mftr $4, $10, 1, 2, 1
-0x23 0x20 0x1a 0x41 # CHECK: mftr $4, $26, 1, 3, 0
-0x23 0x20 0x1f 0x41 # CHECK: mftr $4, $ra, 1, 3, 0
-0x24 0x20 0x0e 0x41 # CHECK: mftr $4, $14, 1, 4, 0
-0x25 0x20 0x0f 0x41 # CHECK: mftr $4, $15, 1, 5, 0
-0x02 0x28 0x84 0x41 # CHECK: mttr $4, $5, 0, 2, 0
-0x20 0x28 0x84 0x41 # CHECK: mttr $4, $5, 1, 0, 0
-0x21 0x00 0x84 0x41 # CHECK: mttr $4, $zero, 1, 1, 0
-0x21 0x50 0x84 0x41 # CHECK: mttr $4, $10, 1, 1, 0
-0x22 0x50 0x84 0x41 # CHECK: mttr $4, $10, 1, 2, 0
-0x32 0x50 0x84 0x41 # CHECK: mttr $4, $10, 1, 2, 1
-0x23 0xd0 0x84 0x41 # CHECK: mttr $4, $26, 1, 3, 0
-0x23 0xf8 0x84 0x41 # CHECK: mttr $4, $ra, 1, 3, 0
-0x24 0x70 0x84 0x41 # CHECK: mttr $4, $14, 1, 4, 0
-0x25 0x78 0x84 0x41 # CHECK: mttr $4, $15, 1, 5, 0
+
diff --git a/test/MC/Disassembler/Mips/mt/valid-r2.txt b/test/MC/Disassembler/Mips/mt/valid-r2.txt
index 4786d8b5591f4..17c42c0614a5b 100644
--- a/test/MC/Disassembler/Mips/mt/valid-r2.txt
+++ b/test/MC/Disassembler/Mips/mt/valid-r2.txt
@@ -10,23 +10,4 @@
0x7c 0x65 0x10 0x08 # CHECK: fork $2, $3, $5
0x7c 0x80 0x00 0x09 # CHECK: yield $4
0x7c 0xa0 0x20 0x09 # CHECK: yield $4, $5
-0x41 0x05 0x20 0x02 # CHECK: mftr $4, $5, 0, 2, 0
-0x41 0x05 0x20 0x20 # CHECK: mftr $4, $5, 1, 0, 0
-0x41 0x00 0x20 0x21 # CHECK: mftr $4, $zero, 1, 1, 0
-0x41 0x0a 0x20 0x21 # CHECK: mftr $4, $10, 1, 1, 0
-0x41 0x0a 0x20 0x22 # CHECK: mftr $4, $10, 1, 2, 0
-0x41 0x0a 0x20 0x32 # CHECK: mftr $4, $10, 1, 2, 1
-0x41 0x1a 0x20 0x23 # CHECK: mftr $4, $26, 1, 3, 0
-0x41 0x1f 0x20 0x23 # CHECK: mftr $4, $ra, 1, 3, 0
-0x41 0x0e 0x20 0x24 # CHECK: mftr $4, $14, 1, 4, 0
-0x41 0x0f 0x20 0x25 # CHECK: mftr $4, $15, 1, 5, 0
-0x41 0x84 0x28 0x02 # CHECK: mttr $4, $5, 0, 2, 0
-0x41 0x84 0x28 0x20 # CHECK: mttr $4, $5, 1, 0, 0
-0x41 0x84 0x00 0x21 # CHECK: mttr $4, $zero, 1, 1, 0
-0x41 0x84 0x50 0x21 # CHECK: mttr $4, $10, 1, 1, 0
-0x41 0x84 0x50 0x22 # CHECK: mttr $4, $10, 1, 2, 0
-0x41 0x84 0x50 0x32 # CHECK: mttr $4, $10, 1, 2, 1
-0x41 0x84 0xd0 0x23 # CHECK: mttr $4, $26, 1, 3, 0
-0x41 0x84 0xf8 0x23 # CHECK: mttr $4, $ra, 1, 3, 0
-0x41 0x84 0x70 0x24 # CHECK: mttr $4, $14, 1, 4, 0
-0x41 0x84 0x78 0x25 # CHECK: mttr $4, $15, 1, 5, 0
+
diff --git a/test/MC/Disassembler/SystemZ/insns-z14.txt b/test/MC/Disassembler/SystemZ/insns-z14.txt
new file mode 100644
index 0000000000000..c73b50c1c2fbd
--- /dev/null
+++ b/test/MC/Disassembler/SystemZ/insns-z14.txt
@@ -0,0 +1,3253 @@
+# Test z14 instructions that don't have PC-relative operands.
+# RUN: llvm-mc --disassemble %s -triple=s390x-linux-gnu -mcpu=z14 \
+# RUN: | FileCheck %s
+
+# CHECK: agh %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x38
+
+# CHECK: agh %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x38
+
+# CHECK: agh %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x38
+
+# CHECK: agh %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x38
+
+# CHECK: agh %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x38
+
+# CHECK: agh %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x38
+
+# CHECK: agh %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x38
+
+# CHECK: agh %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x38
+
+# CHECK: agh %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x38
+
+# CHECK: agh %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x38
+
+# CHECK: bi -524288
+0xe3 0xf0 0x00 0x00 0x80 0x47
+
+# CHECK: bi -1
+0xe3 0xf0 0x0f 0xff 0xff 0x47
+
+# CHECK: bi 0
+0xe3 0xf0 0x00 0x00 0x00 0x47
+
+# CHECK: bi 1
+0xe3 0xf0 0x00 0x01 0x00 0x47
+
+# CHECK: bi 524287
+0xe3 0xf0 0x0f 0xff 0x7f 0x47
+
+# CHECK: bi 0(%r1)
+0xe3 0xf0 0x10 0x00 0x00 0x47
+
+# CHECK: bi 0(%r15)
+0xe3 0xf0 0xf0 0x00 0x00 0x47
+
+# CHECK: bi 524287(%r1,%r15)
+0xe3 0xf1 0xff 0xff 0x7f 0x47
+
+# CHECK: bi 524287(%r15,%r1)
+0xe3 0xff 0x1f 0xff 0x7f 0x47
+
+# CHECK: bic 0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x47
+
+# CHECK: bic 0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x47
+
+# CHECK: bic 0, 0
+0xe3 0x00 0x00 0x00 0x00 0x47
+
+# CHECK: bic 0, 1
+0xe3 0x00 0x00 0x01 0x00 0x47
+
+# CHECK: bic 0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x47
+
+# CHECK: bic 0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x47
+
+# CHECK: bic 0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x47
+
+# CHECK: bic 0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x47
+
+# CHECK: bic 0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x47
+
+# CHECK: bio 0(%r15)
+0xe3 0x10 0xf0 0x00 0x00 0x47
+
+# CHECK: bih 0(%r15)
+0xe3 0x20 0xf0 0x00 0x00 0x47
+
+# CHECK: binle 0(%r15)
+0xe3 0x30 0xf0 0x00 0x00 0x47
+
+# CHECK: bil 0(%r15)
+0xe3 0x40 0xf0 0x00 0x00 0x47
+
+# CHECK: binhe 0(%r15)
+0xe3 0x50 0xf0 0x00 0x00 0x47
+
+# CHECK: bilh 0(%r15)
+0xe3 0x60 0xf0 0x00 0x00 0x47
+
+# CHECK: bine 0(%r15)
+0xe3 0x70 0xf0 0x00 0x00 0x47
+
+# CHECK: bie 0(%r15)
+0xe3 0x80 0xf0 0x00 0x00 0x47
+
+# CHECK: binlh 0(%r15)
+0xe3 0x90 0xf0 0x00 0x00 0x47
+
+# CHECK: bihe 0(%r15)
+0xe3 0xa0 0xf0 0x00 0x00 0x47
+
+# CHECK: binl 0(%r15)
+0xe3 0xb0 0xf0 0x00 0x00 0x47
+
+# CHECK: bile 0(%r15)
+0xe3 0xc0 0xf0 0x00 0x00 0x47
+
+# CHECK: binh 0(%r15)
+0xe3 0xd0 0xf0 0x00 0x00 0x47
+
+# CHECK: bino 0(%r15)
+0xe3 0xe0 0xf0 0x00 0x00 0x47
+
+# CHECK: irbm %r0, %r0
+0xb9 0xac 0x00 0x00
+
+# CHECK: irbm %r0, %r15
+0xb9 0xac 0x00 0x0f
+
+# CHECK: irbm %r15, %r0
+0xb9 0xac 0x00 0xf0
+
+# CHECK: irbm %r7, %r8
+0xb9 0xac 0x00 0x78
+
+# CHECK: irbm %r15, %r15
+0xb9 0xac 0x00 0xff
+
+# CHECK: kma %r2, %r2, %r2
+0xb9 0x29 0x20 0x22
+
+# CHECK: kma %r2, %r8, %r14
+0xb9 0x29 0x80 0x2e
+
+# CHECK: kma %r14, %r8, %r2
+0xb9 0x29 0x80 0xe2
+
+# CHECK: kma %r6, %r8, %r10
+0xb9 0x29 0x80 0x6a
+
+# CHECK: lgg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x4c
+
+# CHECK: lgg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x4c
+
+# CHECK: lgg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x4c
+
+# CHECK: lgg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x4c
+
+# CHECK: lgg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x4c
+
+# CHECK: lgg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x4c
+
+# CHECK: lgg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x4c
+
+# CHECK: lgg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x4c
+
+# CHECK: lgg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x4c
+
+# CHECK: lgg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x4c
+
+# CHECK: lgsc %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x4d
+
+# CHECK: lgsc %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x4d
+
+# CHECK: lgsc %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x4d
+
+# CHECK: lgsc %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x4d
+
+# CHECK: lgsc %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x4d
+
+# CHECK: lgsc %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x4d
+
+# CHECK: lgsc %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x4d
+
+# CHECK: lgsc %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x4d
+
+# CHECK: lgsc %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x4d
+
+# CHECK: llgfsg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x48
+
+# CHECK: llgfsg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x48
+
+# CHECK: llgfsg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x48
+
+# CHECK: llgfsg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x48
+
+# CHECK: llgfsg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x48
+
+# CHECK: llgfsg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x48
+
+# CHECK: llgfsg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x48
+
+# CHECK: llgfsg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x48
+
+# CHECK: llgfsg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x48
+
+# CHECK: llgfsg %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x48
+
+# CHECK: mg %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x84
+
+# CHECK: mg %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x84
+
+# CHECK: mg %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x84
+
+# CHECK: mg %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x84
+
+# CHECK: mg %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x84
+
+# CHECK: mg %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x84
+
+# CHECK: mg %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x84
+
+# CHECK: mg %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x84
+
+# CHECK: mg %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x84
+
+# CHECK: mg %r14, 0
+0xe3 0xe0 0x00 0x00 0x00 0x84
+
+# CHECK: mgh %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x3c
+
+# CHECK: mgh %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x3c
+
+# CHECK: mgh %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x3c
+
+# CHECK: mgh %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x3c
+
+# CHECK: mgh %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x3c
+
+# CHECK: mgh %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x3c
+
+# CHECK: mgh %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x3c
+
+# CHECK: mgh %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x3c
+
+# CHECK: mgh %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x3c
+
+# CHECK: mgh %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x3c
+
+# CHECK: mgrk %r0, %r0, %r0
+0xb9 0xec 0x00 0x00
+
+# CHECK: mgrk %r0, %r0, %r15
+0xb9 0xec 0xf0 0x00
+
+# CHECK: mgrk %r0, %r15, %r0
+0xb9 0xec 0x00 0x0f
+
+# CHECK: mgrk %r14, %r0, %r0
+0xb9 0xec 0x00 0xe0
+
+# CHECK: mgrk %r6, %r8, %r9
+0xb9 0xec 0x90 0x68
+
+# CHECK: msc %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x53
+
+# CHECK: msc %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x53
+
+# CHECK: msc %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x53
+
+# CHECK: msc %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x53
+
+# CHECK: msc %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x53
+
+# CHECK: msc %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x53
+
+# CHECK: msc %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x53
+
+# CHECK: msc %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x53
+
+# CHECK: msc %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x53
+
+# CHECK: msc %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x53
+
+# CHECK: msgc %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x83
+
+# CHECK: msgc %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x83
+
+# CHECK: msgc %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x83
+
+# CHECK: msgc %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x83
+
+# CHECK: msgc %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x83
+
+# CHECK: msgc %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x83
+
+# CHECK: msgc %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x83
+
+# CHECK: msgc %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x83
+
+# CHECK: msgc %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x83
+
+# CHECK: msgc %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x83
+
+# CHECK: msrkc %r0, %r0, %r0
+0xb9 0xfd 0x00 0x00
+
+# CHECK: msrkc %r0, %r0, %r15
+0xb9 0xfd 0xf0 0x00
+
+# CHECK: msrkc %r0, %r15, %r0
+0xb9 0xfd 0x00 0x0f
+
+# CHECK: msrkc %r15, %r0, %r0
+0xb9 0xfd 0x00 0xf0
+
+# CHECK: msrkc %r7, %r8, %r9
+0xb9 0xfd 0x90 0x78
+
+# CHECK: msgrkc %r0, %r0, %r0
+0xb9 0xed 0x00 0x00
+
+# CHECK: msgrkc %r0, %r0, %r15
+0xb9 0xed 0xf0 0x00
+
+# CHECK: msgrkc %r0, %r15, %r0
+0xb9 0xed 0x00 0x0f
+
+# CHECK: msgrkc %r15, %r0, %r0
+0xb9 0xed 0x00 0xf0
+
+# CHECK: msgrkc %r7, %r8, %r9
+0xb9 0xed 0x90 0x78
+
+# CHECK: sgh %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x39
+
+# CHECK: sgh %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x39
+
+# CHECK: sgh %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x39
+
+# CHECK: sgh %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x39
+
+# CHECK: sgh %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x39
+
+# CHECK: sgh %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x39
+
+# CHECK: sgh %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x39
+
+# CHECK: sgh %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x39
+
+# CHECK: sgh %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x39
+
+# CHECK: sgh %r15, 0
+0xe3 0xf0 0x00 0x00 0x00 0x39
+
+# CHECK: stgsc %r0, -524288
+0xe3 0x00 0x00 0x00 0x80 0x49
+
+# CHECK: stgsc %r0, -1
+0xe3 0x00 0x0f 0xff 0xff 0x49
+
+# CHECK: stgsc %r0, 0
+0xe3 0x00 0x00 0x00 0x00 0x49
+
+# CHECK: stgsc %r0, 1
+0xe3 0x00 0x00 0x01 0x00 0x49
+
+# CHECK: stgsc %r0, 524287
+0xe3 0x00 0x0f 0xff 0x7f 0x49
+
+# CHECK: stgsc %r0, 0(%r1)
+0xe3 0x00 0x10 0x00 0x00 0x49
+
+# CHECK: stgsc %r0, 0(%r15)
+0xe3 0x00 0xf0 0x00 0x00 0x49
+
+# CHECK: stgsc %r0, 524287(%r1,%r15)
+0xe3 0x01 0xff 0xff 0x7f 0x49
+
+# CHECK: stgsc %r0, 524287(%r15,%r1)
+0xe3 0x0f 0x1f 0xff 0x7f 0x49
+
+# CHECK: vap %v0, %v0, %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x71
+
+# CHECK: vap %v0, %v0, %v0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x71
+
+# CHECK: vap %v0, %v0, %v0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x71
+
+# CHECK: vap %v0, %v0, %v31, 0, 0
+0xe6 0x00 0xf0 0x00 0x02 0x71
+
+# CHECK: vap %v0, %v31, %v0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x71
+
+# CHECK: vap %v31, %v0, %v0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x71
+
+# CHECK: vap %v13, %v17, %v21, 121, 11
+0xe6 0xd1 0x50 0xb7 0x96 0x71
+
+# CHECK: vbperm %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x85
+
+# CHECK: vbperm %v0, %v0, %v15
+0xe7 0x00 0xf0 0x00 0x00 0x85
+
+# CHECK: vbperm %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x02 0x85
+
+# CHECK: vbperm %v0, %v15, %v0
+0xe7 0x0f 0x00 0x00 0x00 0x85
+
+# CHECK: vbperm %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x04 0x85
+
+# CHECK: vbperm %v15, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x00 0x85
+
+# CHECK: vbperm %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x08 0x85
+
+# CHECK: vbperm %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x85
+
+# CHECK: vcp %v0, %v0, 0
+0xe6 0x00 0x00 0x00 0x00 0x77
+
+# CHECK: vcp %v0, %v0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x77
+
+# CHECK: vcp %v15, %v0, 0
+0xe6 0x0f 0x00 0x00 0x00 0x77
+
+# CHECK: vcp %v31, %v0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x77
+
+# CHECK: vcp %v0, %v15, 0
+0xe6 0x00 0xf0 0x00 0x00 0x77
+
+# CHECK: vcp %v0, %v31, 0
+0xe6 0x00 0xf0 0x00 0x02 0x77
+
+# CHECK: vcp %v3, %v18, 4
+0xe6 0x03 0x20 0x40 0x02 0x77
+
+# CHECK: vcvb %r0, %v0, 0
+0xe6 0x00 0x00 0x00 0x00 0x50
+
+# CHECK: vcvb %r0, %v0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x50
+
+# CHECK: vcvb %r15, %v0, 0
+0xe6 0xf0 0x00 0x00 0x00 0x50
+
+# CHECK: vcvb %r0, %v15, 0
+0xe6 0x0f 0x00 0x00 0x00 0x50
+
+# CHECK: vcvb %r0, %v31, 0
+0xe6 0x0f 0x00 0x00 0x04 0x50
+
+# CHECK: vcvb %r3, %v18, 4
+0xe6 0x32 0x00 0x40 0x04 0x50
+
+# CHECK: vcvbg %r0, %v0, 0
+0xe6 0x00 0x00 0x00 0x00 0x52
+
+# CHECK: vcvbg %r0, %v0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x52
+
+# CHECK: vcvbg %r15, %v0, 0
+0xe6 0xf0 0x00 0x00 0x00 0x52
+
+# CHECK: vcvbg %r0, %v15, 0
+0xe6 0x0f 0x00 0x00 0x00 0x52
+
+# CHECK: vcvbg %r0, %v31, 0
+0xe6 0x0f 0x00 0x00 0x04 0x52
+
+# CHECK: vcvbg %r3, %v18, 4
+0xe6 0x32 0x00 0x40 0x04 0x52
+
+# CHECK: vcvd %v0, %r0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x58
+
+# CHECK: vcvd %v0, %r0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x58
+
+# CHECK: vcvd %v0, %r0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x58
+
+# CHECK: vcvd %v0, %r15, 0, 0
+0xe6 0x0f 0x00 0x00 0x00 0x58
+
+# CHECK: vcvd %v15, %r0, 0, 0
+0xe6 0xf0 0x00 0x00 0x00 0x58
+
+# CHECK: vcvd %v31, %r0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x58
+
+# CHECK: vcvd %v18, %r9, 52, 11
+0xe6 0x29 0x00 0xb3 0x48 0x58
+
+# CHECK: vcvdg %v0, %r0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x5a
+
+# CHECK: vcvdg %v0, %r0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x5a
+
+# CHECK: vcvdg %v0, %r0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x5a
+
+# CHECK: vcvdg %v0, %r15, 0, 0
+0xe6 0x0f 0x00 0x00 0x00 0x5a
+
+# CHECK: vcvdg %v15, %r0, 0, 0
+0xe6 0xf0 0x00 0x00 0x00 0x5a
+
+# CHECK: vcvdg %v31, %r0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x5a
+
+# CHECK: vcvdg %v18, %r9, 52, 11
+0xe6 0x29 0x00 0xb3 0x48 0x5a
+
+# CHECK: vdp %v0, %v0, %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x7a
+
+# CHECK: vdp %v0, %v0, %v0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x7a
+
+# CHECK: vdp %v0, %v0, %v0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x7a
+
+# CHECK: vdp %v0, %v0, %v31, 0, 0
+0xe6 0x00 0xf0 0x00 0x02 0x7a
+
+# CHECK: vdp %v0, %v31, %v0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x7a
+
+# CHECK: vdp %v31, %v0, %v0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x7a
+
+# CHECK: vdp %v13, %v17, %v21, 121, 11
+0xe6 0xd1 0x50 0xb7 0x96 0x7a
+
+# CHECK: vfasb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xe3
+
+# CHECK: vfasb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x22 0xe3
+
+# CHECK: vfasb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x24 0xe3
+
+# CHECK: vfasb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xe3
+
+# CHECK: vfasb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0xe3
+
+# CHECK: vfcesb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xe8
+
+# CHECK: vfcesb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x22 0xe8
+
+# CHECK: vfcesb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x24 0xe8
+
+# CHECK: vfcesb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xe8
+
+# CHECK: vfcesb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0xe8
+
+# CHECK: vfcesbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x10 0x20 0xe8
+
+# CHECK: vfcesbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x10 0x22 0xe8
+
+# CHECK: vfcesbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x10 0x24 0xe8
+
+# CHECK: vfcesbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x10 0x28 0xe8
+
+# CHECK: vfcesbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x10 0x2a 0xe8
+
+# CHECK: vfchsb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xeb
+
+# CHECK: vfchsb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x22 0xeb
+
+# CHECK: vfchsb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x24 0xeb
+
+# CHECK: vfchsb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xeb
+
+# CHECK: vfchsb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0xeb
+
+# CHECK: vfchsbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x10 0x20 0xeb
+
+# CHECK: vfchsbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x10 0x22 0xeb
+
+# CHECK: vfchsbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x10 0x24 0xeb
+
+# CHECK: vfchsbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x10 0x28 0xeb
+
+# CHECK: vfchsbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x10 0x2a 0xeb
+
+# CHECK: vfchesb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xea
+
+# CHECK: vfchesb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x22 0xea
+
+# CHECK: vfchesb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x24 0xea
+
+# CHECK: vfchesb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xea
+
+# CHECK: vfchesb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0xea
+
+# CHECK: vfchesbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x10 0x20 0xea
+
+# CHECK: vfchesbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x10 0x22 0xea
+
+# CHECK: vfchesbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x10 0x24 0xea
+
+# CHECK: vfchesbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x10 0x28 0xea
+
+# CHECK: vfchesbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x10 0x2a 0xea
+
+# CHECK: vfdsb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xe5
+
+# CHECK: vfdsb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x22 0xe5
+
+# CHECK: vfdsb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x24 0xe5
+
+# CHECK: vfdsb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xe5
+
+# CHECK: vfdsb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0xe5
+
+# CHECK: vfisb %v0, %v0, 0, 0
+0xe7 0x00 0x00 0x00 0x20 0xc7
+
+# CHECK: vfisb %v0, %v0, 0, 15
+0xe7 0x00 0x00 0xf0 0x20 0xc7
+
+# CHECK: vfisb %v0, %v0, 4, 0
+0xe7 0x00 0x00 0x04 0x20 0xc7
+
+# CHECK: vfisb %v0, %v0, 7, 0
+0xe7 0x00 0x00 0x07 0x20 0xc7
+
+# CHECK: vfisb %v0, %v31, 0, 0
+0xe7 0x0f 0x00 0x00 0x24 0xc7
+
+# CHECK: vfisb %v31, %v0, 0, 0
+0xe7 0xf0 0x00 0x00 0x28 0xc7
+
+# CHECK: vfisb %v14, %v17, 4, 10
+0xe7 0xe1 0x00 0xa4 0x24 0xc7
+
+# CHECK: vfkedb %v0, %v0, %v0
+0xe7 0x00 0x00 0x04 0x30 0xe8
+
+# CHECK: vfkedb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x04 0x32 0xe8
+
+# CHECK: vfkedb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x04 0x34 0xe8
+
+# CHECK: vfkedb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x04 0x38 0xe8
+
+# CHECK: vfkedb %v18, %v3, %v20
+0xe7 0x23 0x40 0x04 0x3a 0xe8
+
+# CHECK: vfkedbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x14 0x30 0xe8
+
+# CHECK: vfkedbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x14 0x32 0xe8
+
+# CHECK: vfkedbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x14 0x34 0xe8
+
+# CHECK: vfkedbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x14 0x38 0xe8
+
+# CHECK: vfkedbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x14 0x3a 0xe8
+
+# CHECK: vfkesb %v0, %v0, %v0
+0xe7 0x00 0x00 0x04 0x20 0xe8
+
+# CHECK: vfkesb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x04 0x22 0xe8
+
+# CHECK: vfkesb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x04 0x24 0xe8
+
+# CHECK: vfkesb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x04 0x28 0xe8
+
+# CHECK: vfkesb %v18, %v3, %v20
+0xe7 0x23 0x40 0x04 0x2a 0xe8
+
+# CHECK: vfkesbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x14 0x20 0xe8
+
+# CHECK: vfkesbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x14 0x22 0xe8
+
+# CHECK: vfkesbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x14 0x24 0xe8
+
+# CHECK: vfkesbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x14 0x28 0xe8
+
+# CHECK: vfkesbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x14 0x2a 0xe8
+
+# CHECK: vfkhdb %v0, %v0, %v0
+0xe7 0x00 0x00 0x04 0x30 0xeb
+
+# CHECK: vfkhdb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x04 0x32 0xeb
+
+# CHECK: vfkhdb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x04 0x34 0xeb
+
+# CHECK: vfkhdb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x04 0x38 0xeb
+
+# CHECK: vfkhdb %v18, %v3, %v20
+0xe7 0x23 0x40 0x04 0x3a 0xeb
+
+# CHECK: vfkhdbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x14 0x30 0xeb
+
+# CHECK: vfkhdbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x14 0x32 0xeb
+
+# CHECK: vfkhdbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x14 0x34 0xeb
+
+# CHECK: vfkhdbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x14 0x38 0xeb
+
+# CHECK: vfkhdbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x14 0x3a 0xeb
+
+# CHECK: vfkhsb %v0, %v0, %v0
+0xe7 0x00 0x00 0x04 0x20 0xeb
+
+# CHECK: vfkhsb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x04 0x22 0xeb
+
+# CHECK: vfkhsb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x04 0x24 0xeb
+
+# CHECK: vfkhsb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x04 0x28 0xeb
+
+# CHECK: vfkhsb %v18, %v3, %v20
+0xe7 0x23 0x40 0x04 0x2a 0xeb
+
+# CHECK: vfkhsbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x14 0x20 0xeb
+
+# CHECK: vfkhsbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x14 0x22 0xeb
+
+# CHECK: vfkhsbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x14 0x24 0xeb
+
+# CHECK: vfkhsbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x14 0x28 0xeb
+
+# CHECK: vfkhsbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x14 0x2a 0xeb
+
+# CHECK: vfkhedb %v0, %v0, %v0
+0xe7 0x00 0x00 0x04 0x30 0xea
+
+# CHECK: vfkhedb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x04 0x32 0xea
+
+# CHECK: vfkhedb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x04 0x34 0xea
+
+# CHECK: vfkhedb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x04 0x38 0xea
+
+# CHECK: vfkhedb %v18, %v3, %v20
+0xe7 0x23 0x40 0x04 0x3a 0xea
+
+# CHECK: vfkhedbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x14 0x30 0xea
+
+# CHECK: vfkhedbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x14 0x32 0xea
+
+# CHECK: vfkhedbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x14 0x34 0xea
+
+# CHECK: vfkhedbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x14 0x38 0xea
+
+# CHECK: vfkhedbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x14 0x3a 0xea
+
+# CHECK: vfkhesb %v0, %v0, %v0
+0xe7 0x00 0x00 0x04 0x20 0xea
+
+# CHECK: vfkhesb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x04 0x22 0xea
+
+# CHECK: vfkhesb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x04 0x24 0xea
+
+# CHECK: vfkhesb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x04 0x28 0xea
+
+# CHECK: vfkhesb %v18, %v3, %v20
+0xe7 0x23 0x40 0x04 0x2a 0xea
+
+# CHECK: vfkhesbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x14 0x20 0xea
+
+# CHECK: vfkhesbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x14 0x22 0xea
+
+# CHECK: vfkhesbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x14 0x24 0xea
+
+# CHECK: vfkhesbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x14 0x28 0xea
+
+# CHECK: vfkhesbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x14 0x2a 0xea
+
+# CHECK: vfpsosb %v0, %v0, 3
+0xe7 0x00 0x00 0x30 0x20 0xcc
+
+# CHECK: vfpsosb %v0, %v0, 15
+0xe7 0x00 0x00 0xf0 0x20 0xcc
+
+# CHECK: vfpsosb %v0, %v15, 3
+0xe7 0x0f 0x00 0x30 0x20 0xcc
+
+# CHECK: vfpsosb %v0, %v31, 3
+0xe7 0x0f 0x00 0x30 0x24 0xcc
+
+# CHECK: vfpsosb %v15, %v0, 3
+0xe7 0xf0 0x00 0x30 0x20 0xcc
+
+# CHECK: vfpsosb %v31, %v0, 3
+0xe7 0xf0 0x00 0x30 0x28 0xcc
+
+# CHECK: vfpsosb %v14, %v17, 7
+0xe7 0xe1 0x00 0x70 0x24 0xcc
+
+# CHECK: vflcsb %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xcc
+
+# CHECK: vflcsb %v0, %v15
+0xe7 0x0f 0x00 0x00 0x20 0xcc
+
+# CHECK: vflcsb %v0, %v31
+0xe7 0x0f 0x00 0x00 0x24 0xcc
+
+# CHECK: vflcsb %v15, %v0
+0xe7 0xf0 0x00 0x00 0x20 0xcc
+
+# CHECK: vflcsb %v31, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xcc
+
+# CHECK: vflcsb %v14, %v17
+0xe7 0xe1 0x00 0x00 0x24 0xcc
+
+# CHECK: vflnsb %v0, %v0
+0xe7 0x00 0x00 0x10 0x20 0xcc
+
+# CHECK: vflnsb %v0, %v15
+0xe7 0x0f 0x00 0x10 0x20 0xcc
+
+# CHECK: vflnsb %v0, %v31
+0xe7 0x0f 0x00 0x10 0x24 0xcc
+
+# CHECK: vflnsb %v15, %v0
+0xe7 0xf0 0x00 0x10 0x20 0xcc
+
+# CHECK: vflnsb %v31, %v0
+0xe7 0xf0 0x00 0x10 0x28 0xcc
+
+# CHECK: vflnsb %v14, %v17
+0xe7 0xe1 0x00 0x10 0x24 0xcc
+
+# CHECK: vflpsb %v0, %v0
+0xe7 0x00 0x00 0x20 0x20 0xcc
+
+# CHECK: vflpsb %v0, %v15
+0xe7 0x0f 0x00 0x20 0x20 0xcc
+
+# CHECK: vflpsb %v0, %v31
+0xe7 0x0f 0x00 0x20 0x24 0xcc
+
+# CHECK: vflpsb %v15, %v0
+0xe7 0xf0 0x00 0x20 0x20 0xcc
+
+# CHECK: vflpsb %v31, %v0
+0xe7 0xf0 0x00 0x20 0x28 0xcc
+
+# CHECK: vflpsb %v14, %v17
+0xe7 0xe1 0x00 0x20 0x24 0xcc
+
+# CHECK: vfmax %v0, %v0, %v0, 0, 0, 0
+0xe7 0x00 0x00 0x00 0x00 0xef
+
+# CHECK: vfmax %v0, %v0, %v0, 15, 0, 0
+0xe7 0x00 0x00 0x00 0xf0 0xef
+
+# CHECK: vfmax %v0, %v0, %v0, 0, 15, 0
+0xe7 0x00 0x00 0x0f 0x00 0xef
+
+# CHECK: vfmax %v0, %v0, %v0, 0, 0, 4
+0xe7 0x00 0x00 0x40 0x00 0xef
+
+# CHECK: vfmax %v0, %v0, %v31, 0, 0, 0
+0xe7 0x00 0xf0 0x00 0x02 0xef
+
+# CHECK: vfmax %v0, %v31, %v0, 0, 0, 0
+0xe7 0x0f 0x00 0x00 0x04 0xef
+
+# CHECK: vfmax %v31, %v0, %v0, 0, 0, 0
+0xe7 0xf0 0x00 0x00 0x08 0xef
+
+# CHECK: vfmax %v18, %v3, %v20, 11, 9, 12
+0xe7 0x23 0x40 0xc9 0xba 0xef
+
+# CHECK: vfmaxdb %v0, %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x30 0xef
+
+# CHECK: vfmaxdb %v0, %v0, %v0, 4
+0xe7 0x00 0x00 0x40 0x30 0xef
+
+# CHECK: vfmaxdb %v0, %v0, %v31, 0
+0xe7 0x00 0xf0 0x00 0x32 0xef
+
+# CHECK: vfmaxdb %v0, %v31, %v0, 0
+0xe7 0x0f 0x00 0x00 0x34 0xef
+
+# CHECK: vfmaxdb %v31, %v0, %v0, 0
+0xe7 0xf0 0x00 0x00 0x38 0xef
+
+# CHECK: vfmaxdb %v18, %v3, %v20, 12
+0xe7 0x23 0x40 0xc0 0x3a 0xef
+
+# CHECK: vfmaxsb %v0, %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x20 0xef
+
+# CHECK: vfmaxsb %v0, %v0, %v0, 4
+0xe7 0x00 0x00 0x40 0x20 0xef
+
+# CHECK: vfmaxsb %v0, %v0, %v31, 0
+0xe7 0x00 0xf0 0x00 0x22 0xef
+
+# CHECK: vfmaxsb %v0, %v31, %v0, 0
+0xe7 0x0f 0x00 0x00 0x24 0xef
+
+# CHECK: vfmaxsb %v31, %v0, %v0, 0
+0xe7 0xf0 0x00 0x00 0x28 0xef
+
+# CHECK: vfmaxsb %v18, %v3, %v20, 12
+0xe7 0x23 0x40 0xc0 0x2a 0xef
+
+# CHECK: vfmin %v0, %v0, %v0, 0, 0, 0
+0xe7 0x00 0x00 0x00 0x00 0xee
+
+# CHECK: vfmin %v0, %v0, %v0, 15, 0, 0
+0xe7 0x00 0x00 0x00 0xf0 0xee
+
+# CHECK: vfmin %v0, %v0, %v0, 0, 15, 0
+0xe7 0x00 0x00 0x0f 0x00 0xee
+
+# CHECK: vfmin %v0, %v0, %v0, 0, 0, 4
+0xe7 0x00 0x00 0x40 0x00 0xee
+
+# CHECK: vfmin %v0, %v0, %v31, 0, 0, 0
+0xe7 0x00 0xf0 0x00 0x02 0xee
+
+# CHECK: vfmin %v0, %v31, %v0, 0, 0, 0
+0xe7 0x0f 0x00 0x00 0x04 0xee
+
+# CHECK: vfmin %v31, %v0, %v0, 0, 0, 0
+0xe7 0xf0 0x00 0x00 0x08 0xee
+
+# CHECK: vfmin %v18, %v3, %v20, 11, 9, 12
+0xe7 0x23 0x40 0xc9 0xba 0xee
+
+# CHECK: vfmindb %v0, %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x30 0xee
+
+# CHECK: vfmindb %v0, %v0, %v0, 4
+0xe7 0x00 0x00 0x40 0x30 0xee
+
+# CHECK: vfmindb %v0, %v0, %v31, 0
+0xe7 0x00 0xf0 0x00 0x32 0xee
+
+# CHECK: vfmindb %v0, %v31, %v0, 0
+0xe7 0x0f 0x00 0x00 0x34 0xee
+
+# CHECK: vfmindb %v31, %v0, %v0, 0
+0xe7 0xf0 0x00 0x00 0x38 0xee
+
+# CHECK: vfmindb %v18, %v3, %v20, 12
+0xe7 0x23 0x40 0xc0 0x3a 0xee
+
+# CHECK: vfminsb %v0, %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x20 0xee
+
+# CHECK: vfminsb %v0, %v0, %v0, 4
+0xe7 0x00 0x00 0x40 0x20 0xee
+
+# CHECK: vfminsb %v0, %v0, %v31, 0
+0xe7 0x00 0xf0 0x00 0x22 0xee
+
+# CHECK: vfminsb %v0, %v31, %v0, 0
+0xe7 0x0f 0x00 0x00 0x24 0xee
+
+# CHECK: vfminsb %v31, %v0, %v0, 0
+0xe7 0xf0 0x00 0x00 0x28 0xee
+
+# CHECK: vfminsb %v18, %v3, %v20, 12
+0xe7 0x23 0x40 0xc0 0x2a 0xee
+
+# CHECK: vfmasb %v0, %v0, %v0, %v0
+0xe7 0x00 0x02 0x00 0x00 0x8f
+
+# CHECK: vfmasb %v0, %v0, %v0, %v31
+0xe7 0x00 0x02 0x00 0xf1 0x8f
+
+# CHECK: vfmasb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf2 0x00 0x02 0x8f
+
+# CHECK: vfmasb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x02 0x00 0x04 0x8f
+
+# CHECK: vfmasb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x02 0x00 0x08 0x8f
+
+# CHECK: vfmasb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x52 0x00 0x97 0x8f
+
+# CHECK: vfmsb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xe7
+
+# CHECK: vfmsb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x22 0xe7
+
+# CHECK: vfmsb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x24 0xe7
+
+# CHECK: vfmsb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xe7
+
+# CHECK: vfmsb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0xe7
+
+# CHECK: vfmssb %v0, %v0, %v0, %v0
+0xe7 0x00 0x02 0x00 0x00 0x8e
+
+# CHECK: vfmssb %v0, %v0, %v0, %v31
+0xe7 0x00 0x02 0x00 0xf1 0x8e
+
+# CHECK: vfmssb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf2 0x00 0x02 0x8e
+
+# CHECK: vfmssb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x02 0x00 0x04 0x8e
+
+# CHECK: vfmssb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x02 0x00 0x08 0x8e
+
+# CHECK: vfmssb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x52 0x00 0x97 0x8e
+
+# CHECK: vfnma %v0, %v0, %v0, %v0, 0, 0
+0xe7 0x00 0x00 0x00 0x00 0x9f
+
+# CHECK: vfnma %v0, %v0, %v0, %v0, 0, 15
+0xe7 0x00 0x0f 0x00 0x00 0x9f
+
+# CHECK: vfnma %v0, %v0, %v0, %v0, 15, 0
+0xe7 0x00 0x00 0x0f 0x00 0x9f
+
+# CHECK: vfnma %v0, %v0, %v0, %v31, 0, 0
+0xe7 0x00 0x00 0x00 0xf1 0x9f
+
+# CHECK: vfnma %v0, %v0, %v31, %v0, 0, 0
+0xe7 0x00 0xf0 0x00 0x02 0x9f
+
+# CHECK: vfnma %v0, %v31, %v0, %v0, 0, 0
+0xe7 0x0f 0x00 0x00 0x04 0x9f
+
+# CHECK: vfnma %v31, %v0, %v0, %v0, 0, 0
+0xe7 0xf0 0x00 0x00 0x08 0x9f
+
+# CHECK: vfnma %v13, %v17, %v21, %v25, 9, 11
+0xe7 0xd1 0x5b 0x09 0x97 0x9f
+
+# CHECK: vfnmadb %v0, %v0, %v0, %v0
+0xe7 0x00 0x03 0x00 0x00 0x9f
+
+# CHECK: vfnmadb %v0, %v0, %v0, %v31
+0xe7 0x00 0x03 0x00 0xf1 0x9f
+
+# CHECK: vfnmadb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf3 0x00 0x02 0x9f
+
+# CHECK: vfnmadb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x03 0x00 0x04 0x9f
+
+# CHECK: vfnmadb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x03 0x00 0x08 0x9f
+
+# CHECK: vfnmadb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x53 0x00 0x97 0x9f
+
+# CHECK: vfnmasb %v0, %v0, %v0, %v0
+0xe7 0x00 0x02 0x00 0x00 0x9f
+
+# CHECK: vfnmasb %v0, %v0, %v0, %v31
+0xe7 0x00 0x02 0x00 0xf1 0x9f
+
+# CHECK: vfnmasb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf2 0x00 0x02 0x9f
+
+# CHECK: vfnmasb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x02 0x00 0x04 0x9f
+
+# CHECK: vfnmasb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x02 0x00 0x08 0x9f
+
+# CHECK: vfnmasb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x52 0x00 0x97 0x9f
+
+# CHECK: vfnms %v0, %v0, %v0, %v0, 0, 0
+0xe7 0x00 0x00 0x00 0x00 0x9e
+
+# CHECK: vfnms %v0, %v0, %v0, %v0, 0, 15
+0xe7 0x00 0x0f 0x00 0x00 0x9e
+
+# CHECK: vfnms %v0, %v0, %v0, %v0, 15, 0
+0xe7 0x00 0x00 0x0f 0x00 0x9e
+
+# CHECK: vfnms %v0, %v0, %v0, %v31, 0, 0
+0xe7 0x00 0x00 0x00 0xf1 0x9e
+
+# CHECK: vfnms %v0, %v0, %v31, %v0, 0, 0
+0xe7 0x00 0xf0 0x00 0x02 0x9e
+
+# CHECK: vfnms %v0, %v31, %v0, %v0, 0, 0
+0xe7 0x0f 0x00 0x00 0x04 0x9e
+
+# CHECK: vfnms %v31, %v0, %v0, %v0, 0, 0
+0xe7 0xf0 0x00 0x00 0x08 0x9e
+
+# CHECK: vfnms %v13, %v17, %v21, %v25, 9, 11
+0xe7 0xd1 0x5b 0x09 0x97 0x9e
+
+# CHECK: vfnmsdb %v0, %v0, %v0, %v0
+0xe7 0x00 0x03 0x00 0x00 0x9e
+
+# CHECK: vfnmsdb %v0, %v0, %v0, %v31
+0xe7 0x00 0x03 0x00 0xf1 0x9e
+
+# CHECK: vfnmsdb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf3 0x00 0x02 0x9e
+
+# CHECK: vfnmsdb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x03 0x00 0x04 0x9e
+
+# CHECK: vfnmsdb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x03 0x00 0x08 0x9e
+
+# CHECK: vfnmsdb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x53 0x00 0x97 0x9e
+
+# CHECK: vfnmssb %v0, %v0, %v0, %v0
+0xe7 0x00 0x02 0x00 0x00 0x9e
+
+# CHECK: vfnmssb %v0, %v0, %v0, %v31
+0xe7 0x00 0x02 0x00 0xf1 0x9e
+
+# CHECK: vfnmssb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf2 0x00 0x02 0x9e
+
+# CHECK: vfnmssb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x02 0x00 0x04 0x9e
+
+# CHECK: vfnmssb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x02 0x00 0x08 0x9e
+
+# CHECK: vfnmssb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x52 0x00 0x97 0x9e
+
+# CHECK: vfssb %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xe2
+
+# CHECK: vfssb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x22 0xe2
+
+# CHECK: vfssb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x24 0xe2
+
+# CHECK: vfssb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xe2
+
+# CHECK: vfssb %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x2a 0xe2
+
+# CHECK: vfsqsb %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0xce
+
+# CHECK: vfsqsb %v0, %v15
+0xe7 0x0f 0x00 0x00 0x20 0xce
+
+# CHECK: vfsqsb %v0, %v31
+0xe7 0x0f 0x00 0x00 0x24 0xce
+
+# CHECK: vfsqsb %v15, %v0
+0xe7 0xf0 0x00 0x00 0x20 0xce
+
+# CHECK: vfsqsb %v31, %v0
+0xe7 0xf0 0x00 0x00 0x28 0xce
+
+# CHECK: vfsqsb %v14, %v17
+0xe7 0xe1 0x00 0x00 0x24 0xce
+
+# CHECK: vftcisb %v0, %v0, 0
+0xe7 0x00 0x00 0x00 0x20 0x4a
+
+# CHECK: vftcisb %v0, %v0, 4095
+0xe7 0x00 0xff 0xf0 0x20 0x4a
+
+# CHECK: vftcisb %v0, %v15, 0
+0xe7 0x0f 0x00 0x00 0x20 0x4a
+
+# CHECK: vftcisb %v0, %v31, 0
+0xe7 0x0f 0x00 0x00 0x24 0x4a
+
+# CHECK: vftcisb %v15, %v0, 0
+0xe7 0xf0 0x00 0x00 0x20 0x4a
+
+# CHECK: vftcisb %v31, %v0, 0
+0xe7 0xf0 0x00 0x00 0x28 0x4a
+
+# CHECK: vftcisb %v4, %v21, 1656
+0xe7 0x45 0x67 0x80 0x24 0x4a
+
+# CHECK: vlip %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x49
+
+# CHECK: vlip %v0, 0, 15
+0xe6 0x00 0x00 0x00 0xf0 0x49
+
+# CHECK: vlip %v0, 65535, 0
+0xe6 0x00 0xff 0xff 0x00 0x49
+
+# CHECK: vlip %v15, 0, 0
+0xe6 0xf0 0x00 0x00 0x00 0x49
+
+# CHECK: vlip %v31, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x49
+
+# CHECK: vlip %v17, 4660, 7
+0xe6 0x10 0x12 0x34 0x78 0x49
+
+# CHECK: vllezlf %v0, 0
+0xe7 0x00 0x00 0x00 0x60 0x04
+
+# CHECK: vllezlf %v0, 4095
+0xe7 0x00 0x0f 0xff 0x60 0x04
+
+# CHECK: vllezlf %v0, 0(%r15)
+0xe7 0x00 0xf0 0x00 0x60 0x04
+
+# CHECK: vllezlf %v0, 0(%r15,%r1)
+0xe7 0x0f 0x10 0x00 0x60 0x04
+
+# CHECK: vllezlf %v15, 0
+0xe7 0xf0 0x00 0x00 0x60 0x04
+
+# CHECK: vllezlf %v31, 0
+0xe7 0xf0 0x00 0x00 0x68 0x04
+
+# CHECK: vllezlf %v18, 1383(%r3,%r4)
+0xe7 0x23 0x45 0x67 0x68 0x04
+
+# CHECK: vlrl %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x35
+
+# CHECK: vlrl %v0, 4095, 0
+0xe6 0x00 0x0f 0xff 0x00 0x35
+
+# CHECK: vlrl %v0, 0(%r15), 0
+0xe6 0x00 0xf0 0x00 0x00 0x35
+
+# CHECK: vlrl %v0, 0, 255
+0xe6 0xff 0x00 0x00 0x00 0x35
+
+# CHECK: vlrl %v15, 0, 0
+0xe6 0x00 0x00 0x00 0xf0 0x35
+
+# CHECK: vlrl %v31, 0, 0
+0xe6 0x00 0x00 0x00 0xf1 0x35
+
+# CHECK: vlrl %v18, 1383(%r4), 3
+0xe6 0x03 0x45 0x67 0x21 0x35
+
+# CHECK: vlrlr %v0, %r0, 0
+0xe6 0x00 0x00 0x00 0x00 0x37
+
+# CHECK: vlrlr %v0, %r0, 4095
+0xe6 0x00 0x0f 0xff 0x00 0x37
+
+# CHECK: vlrlr %v0, %r0, 0(%r15)
+0xe6 0x00 0xf0 0x00 0x00 0x37
+
+# CHECK: vlrlr %v0, %r15, 0
+0xe6 0x0f 0x00 0x00 0x00 0x37
+
+# CHECK: vlrlr %v15, %r0, 0
+0xe6 0x00 0x00 0x00 0xf0 0x37
+
+# CHECK: vlrlr %v31, %r0, 0
+0xe6 0x00 0x00 0x00 0xf1 0x37
+
+# CHECK: vlrlr %v18, %r3, 1383(%r4)
+0xe6 0x03 0x45 0x67 0x21 0x37
+
+# CHECK: vmsl %v0, %v0, %v0, %v0, 0, 0
+0xe7 0x00 0x00 0x00 0x00 0xb8
+
+# CHECK: vmsl %v0, %v0, %v0, %v0, 15, 0
+0xe7 0x00 0x0f 0x00 0x00 0xb8
+
+# CHECK: vmsl %v0, %v0, %v0, %v0, 0, 12
+0xe7 0x00 0x00 0xc0 0x00 0xb8
+
+# CHECK: vmsl %v0, %v0, %v0, %v15, 0, 0
+0xe7 0x00 0x00 0x00 0xf0 0xb8
+
+# CHECK: vmsl %v0, %v0, %v0, %v31, 0, 0
+0xe7 0x00 0x00 0x00 0xf1 0xb8
+
+# CHECK: vmsl %v0, %v0, %v15, %v0, 0, 0
+0xe7 0x00 0xf0 0x00 0x00 0xb8
+
+# CHECK: vmsl %v0, %v0, %v31, %v0, 0, 0
+0xe7 0x00 0xf0 0x00 0x02 0xb8
+
+# CHECK: vmsl %v0, %v15, %v0, %v0, 0, 0
+0xe7 0x0f 0x00 0x00 0x00 0xb8
+
+# CHECK: vmsl %v0, %v31, %v0, %v0, 0, 0
+0xe7 0x0f 0x00 0x00 0x04 0xb8
+
+# CHECK: vmsl %v15, %v0, %v0, %v0, 0, 0
+0xe7 0xf0 0x00 0x00 0x00 0xb8
+
+# CHECK: vmsl %v31, %v0, %v0, %v0, 0, 0
+0xe7 0xf0 0x00 0x00 0x08 0xb8
+
+# CHECK: vmsl %v18, %v3, %v20, %v5, 0, 4
+0xe7 0x23 0x40 0x40 0x5a 0xb8
+
+# CHECK: vmsl %v18, %v3, %v20, %v5, 11, 8
+0xe7 0x23 0x4b 0x80 0x5a 0xb8
+
+# CHECK: vmslg %v0, %v0, %v0, %v0, 0
+0xe7 0x00 0x03 0x00 0x00 0xb8
+
+# CHECK: vmslg %v0, %v0, %v0, %v0, 12
+0xe7 0x00 0x03 0xc0 0x00 0xb8
+
+# CHECK: vmslg %v0, %v0, %v0, %v15, 0
+0xe7 0x00 0x03 0x00 0xf0 0xb8
+
+# CHECK: vmslg %v0, %v0, %v0, %v31, 0
+0xe7 0x00 0x03 0x00 0xf1 0xb8
+
+# CHECK: vmslg %v0, %v0, %v15, %v0, 0
+0xe7 0x00 0xf3 0x00 0x00 0xb8
+
+# CHECK: vmslg %v0, %v0, %v31, %v0, 0
+0xe7 0x00 0xf3 0x00 0x02 0xb8
+
+# CHECK: vmslg %v0, %v15, %v0, %v0, 0
+0xe7 0x0f 0x03 0x00 0x00 0xb8
+
+# CHECK: vmslg %v0, %v31, %v0, %v0, 0
+0xe7 0x0f 0x03 0x00 0x04 0xb8
+
+# CHECK: vmslg %v15, %v0, %v0, %v0, 0
+0xe7 0xf0 0x03 0x00 0x00 0xb8
+
+# CHECK: vmslg %v31, %v0, %v0, %v0, 0
+0xe7 0xf0 0x03 0x00 0x08 0xb8
+
+# CHECK: vmslg %v18, %v3, %v20, %v5, 4
+0xe7 0x23 0x43 0x40 0x5a 0xb8
+
+# CHECK: vmslg %v18, %v3, %v20, %v5, 8
+0xe7 0x23 0x43 0x80 0x5a 0xb8
+
+# CHECK: vmp %v0, %v0, %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x78
+
+# CHECK: vmp %v0, %v0, %v0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x78
+
+# CHECK: vmp %v0, %v0, %v0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x78
+
+# CHECK: vmp %v0, %v0, %v31, 0, 0
+0xe6 0x00 0xf0 0x00 0x02 0x78
+
+# CHECK: vmp %v0, %v31, %v0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x78
+
+# CHECK: vmp %v31, %v0, %v0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x78
+
+# CHECK: vmp %v13, %v17, %v21, 121, 11
+0xe6 0xd1 0x50 0xb7 0x96 0x78
+
+# CHECK: vmsp %v0, %v0, %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x79
+
+# CHECK: vmsp %v0, %v0, %v0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x79
+
+# CHECK: vmsp %v0, %v0, %v0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x79
+
+# CHECK: vmsp %v0, %v0, %v31, 0, 0
+0xe6 0x00 0xf0 0x00 0x02 0x79
+
+# CHECK: vmsp %v0, %v31, %v0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x79
+
+# CHECK: vmsp %v31, %v0, %v0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x79
+
+# CHECK: vmsp %v13, %v17, %v21, 121, 11
+0xe6 0xd1 0x50 0xb7 0x96 0x79
+
+# CHECK: vnn %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x6e
+
+# CHECK: vnn %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x02 0x6e
+
+# CHECK: vnn %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x04 0x6e
+
+# CHECK: vnn %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x08 0x6e
+
+# CHECK: vnn %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x6e
+
+# CHECK: vnx %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x6c
+
+# CHECK: vnx %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x02 0x6c
+
+# CHECK: vnx %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x04 0x6c
+
+# CHECK: vnx %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x08 0x6c
+
+# CHECK: vnx %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x6c
+
+# CHECK: voc %v0, %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x6f
+
+# CHECK: voc %v0, %v0, %v31
+0xe7 0x00 0xf0 0x00 0x02 0x6f
+
+# CHECK: voc %v0, %v31, %v0
+0xe7 0x0f 0x00 0x00 0x04 0x6f
+
+# CHECK: voc %v31, %v0, %v0
+0xe7 0xf0 0x00 0x00 0x08 0x6f
+
+# CHECK: voc %v18, %v3, %v20
+0xe7 0x23 0x40 0x00 0x0a 0x6f
+
+# CHECK: vpkz %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x34
+
+# CHECK: vpkz %v0, 4095, 0
+0xe6 0x00 0x0f 0xff 0x00 0x34
+
+# CHECK: vpkz %v0, 0(%r15), 0
+0xe6 0x00 0xf0 0x00 0x00 0x34
+
+# CHECK: vpkz %v0, 0, 255
+0xe6 0xff 0x00 0x00 0x00 0x34
+
+# CHECK: vpkz %v15, 0, 0
+0xe6 0x00 0x00 0x00 0xf0 0x34
+
+# CHECK: vpkz %v31, 0, 0
+0xe6 0x00 0x00 0x00 0xf1 0x34
+
+# CHECK: vpkz %v18, 1383(%r4), 3
+0xe6 0x03 0x45 0x67 0x21 0x34
+
+# CHECK: vpopctb %v0, %v0
+0xe7 0x00 0x00 0x00 0x00 0x50
+
+# CHECK: vpopctb %v0, %v15
+0xe7 0x0f 0x00 0x00 0x00 0x50
+
+# CHECK: vpopctb %v0, %v31
+0xe7 0x0f 0x00 0x00 0x04 0x50
+
+# CHECK: vpopctb %v15, %v0
+0xe7 0xf0 0x00 0x00 0x00 0x50
+
+# CHECK: vpopctb %v31, %v0
+0xe7 0xf0 0x00 0x00 0x08 0x50
+
+# CHECK: vpopctb %v14, %v17
+0xe7 0xe1 0x00 0x00 0x04 0x50
+
+# CHECK: vpopctf %v0, %v0
+0xe7 0x00 0x00 0x00 0x20 0x50
+
+# CHECK: vpopctf %v0, %v15
+0xe7 0x0f 0x00 0x00 0x20 0x50
+
+# CHECK: vpopctf %v0, %v31
+0xe7 0x0f 0x00 0x00 0x24 0x50
+
+# CHECK: vpopctf %v15, %v0
+0xe7 0xf0 0x00 0x00 0x20 0x50
+
+# CHECK: vpopctf %v31, %v0
+0xe7 0xf0 0x00 0x00 0x28 0x50
+
+# CHECK: vpopctf %v14, %v17
+0xe7 0xe1 0x00 0x00 0x24 0x50
+
+# CHECK: vpopctg %v0, %v0
+0xe7 0x00 0x00 0x00 0x30 0x50
+
+# CHECK: vpopctg %v0, %v15
+0xe7 0x0f 0x00 0x00 0x30 0x50
+
+# CHECK: vpopctg %v0, %v31
+0xe7 0x0f 0x00 0x00 0x34 0x50
+
+# CHECK: vpopctg %v15, %v0
+0xe7 0xf0 0x00 0x00 0x30 0x50
+
+# CHECK: vpopctg %v31, %v0
+0xe7 0xf0 0x00 0x00 0x38 0x50
+
+# CHECK: vpopctg %v14, %v17
+0xe7 0xe1 0x00 0x00 0x34 0x50
+
+# CHECK: vpopcth %v0, %v0
+0xe7 0x00 0x00 0x00 0x10 0x50
+
+# CHECK: vpopcth %v0, %v15
+0xe7 0x0f 0x00 0x00 0x10 0x50
+
+# CHECK: vpopcth %v0, %v31
+0xe7 0x0f 0x00 0x00 0x14 0x50
+
+# CHECK: vpopcth %v15, %v0
+0xe7 0xf0 0x00 0x00 0x10 0x50
+
+# CHECK: vpopcth %v31, %v0
+0xe7 0xf0 0x00 0x00 0x18 0x50
+
+# CHECK: vpopcth %v14, %v17
+0xe7 0xe1 0x00 0x00 0x14 0x50
+
+# CHECK: vpsop %v0, %v0, 0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x5b
+
+# CHECK: vpsop %v0, %v0, 0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x5b
+
+# CHECK: vpsop %v0, %v0, 0, 255, 0
+0xe6 0x00 0xff 0x00 0x00 0x5b
+
+# CHECK: vpsop %v0, %v0, 255, 0, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x5b
+
+# CHECK: vpsop %v0, %v31, 0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x5b
+
+# CHECK: vpsop %v31, %v0, 0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x5b
+
+# CHECK: vpsop %v13, %v17, 52, 121, 11
+0xe6 0xd1 0x79 0xb3 0x44 0x5b
+
+# CHECK: vrp %v0, %v0, %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x7b
+
+# CHECK: vrp %v0, %v0, %v0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x7b
+
+# CHECK: vrp %v0, %v0, %v0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x7b
+
+# CHECK: vrp %v0, %v0, %v31, 0, 0
+0xe6 0x00 0xf0 0x00 0x02 0x7b
+
+# CHECK: vrp %v0, %v31, %v0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x7b
+
+# CHECK: vrp %v31, %v0, %v0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x7b
+
+# CHECK: vrp %v13, %v17, %v21, 121, 11
+0xe6 0xd1 0x50 0xb7 0x96 0x7b
+
+# CHECK: vsdp %v0, %v0, %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x7e
+
+# CHECK: vsdp %v0, %v0, %v0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x7e
+
+# CHECK: vsdp %v0, %v0, %v0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x7e
+
+# CHECK: vsdp %v0, %v0, %v31, 0, 0
+0xe6 0x00 0xf0 0x00 0x02 0x7e
+
+# CHECK: vsdp %v0, %v31, %v0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x7e
+
+# CHECK: vsdp %v31, %v0, %v0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x7e
+
+# CHECK: vsdp %v13, %v17, %v21, 121, 11
+0xe6 0xd1 0x50 0xb7 0x96 0x7e
+
+# CHECK: vsp %v0, %v0, %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x73
+
+# CHECK: vsp %v0, %v0, %v0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x73
+
+# CHECK: vsp %v0, %v0, %v0, 255, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x73
+
+# CHECK: vsp %v0, %v0, %v31, 0, 0
+0xe6 0x00 0xf0 0x00 0x02 0x73
+
+# CHECK: vsp %v0, %v31, %v0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x73
+
+# CHECK: vsp %v31, %v0, %v0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x73
+
+# CHECK: vsp %v13, %v17, %v21, 121, 11
+0xe6 0xd1 0x50 0xb7 0x96 0x73
+
+# CHECK: vsrp %v0, %v0, 0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x59
+
+# CHECK: vsrp %v0, %v0, 0, 0, 15
+0xe6 0x00 0x00 0xf0 0x00 0x59
+
+# CHECK: vsrp %v0, %v0, 0, 255, 0
+0xe6 0x00 0xff 0x00 0x00 0x59
+
+# CHECK: vsrp %v0, %v0, 255, 0, 0
+0xe6 0x00 0x00 0x0f 0xf0 0x59
+
+# CHECK: vsrp %v0, %v31, 0, 0, 0
+0xe6 0x0f 0x00 0x00 0x04 0x59
+
+# CHECK: vsrp %v31, %v0, 0, 0, 0
+0xe6 0xf0 0x00 0x00 0x08 0x59
+
+# CHECK: vsrp %v13, %v17, 52, 121, 11
+0xe6 0xd1 0x79 0xb3 0x44 0x59
+
+# CHECK: vstrl %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x3d
+
+# CHECK: vstrl %v0, 4095, 0
+0xe6 0x00 0x0f 0xff 0x00 0x3d
+
+# CHECK: vstrl %v0, 0(%r15), 0
+0xe6 0x00 0xf0 0x00 0x00 0x3d
+
+# CHECK: vstrl %v0, 0, 255
+0xe6 0xff 0x00 0x00 0x00 0x3d
+
+# CHECK: vstrl %v15, 0, 0
+0xe6 0x00 0x00 0x00 0xf0 0x3d
+
+# CHECK: vstrl %v31, 0, 0
+0xe6 0x00 0x00 0x00 0xf1 0x3d
+
+# CHECK: vstrl %v18, 1383(%r4), 3
+0xe6 0x03 0x45 0x67 0x21 0x3d
+
+# CHECK: vstrlr %v0, %r0, 0
+0xe6 0x00 0x00 0x00 0x00 0x3f
+
+# CHECK: vstrlr %v0, %r0, 4095
+0xe6 0x00 0x0f 0xff 0x00 0x3f
+
+# CHECK: vstrlr %v0, %r0, 0(%r15)
+0xe6 0x00 0xf0 0x00 0x00 0x3f
+
+# CHECK: vstrlr %v0, %r15, 0
+0xe6 0x0f 0x00 0x00 0x00 0x3f
+
+# CHECK: vstrlr %v15, %r0, 0
+0xe6 0x00 0x00 0x00 0xf0 0x3f
+
+# CHECK: vstrlr %v31, %r0, 0
+0xe6 0x00 0x00 0x00 0xf1 0x3f
+
+# CHECK: vstrlr %v18, %r3, 1383(%r4)
+0xe6 0x03 0x45 0x67 0x21 0x3f
+
+# CHECK: vtp %v0
+0xe6 0x00 0x00 0x00 0x00 0x5f
+
+# CHECK: vtp %v15
+0xe6 0x0f 0x00 0x00 0x00 0x5f
+
+# CHECK: vtp %v31
+0xe6 0x0f 0x00 0x00 0x04 0x5f
+
+# CHECK: vupkz %v0, 0, 0
+0xe6 0x00 0x00 0x00 0x00 0x3c
+
+# CHECK: vupkz %v0, 4095, 0
+0xe6 0x00 0x0f 0xff 0x00 0x3c
+
+# CHECK: vupkz %v0, 0(%r15), 0
+0xe6 0x00 0xf0 0x00 0x00 0x3c
+
+# CHECK: vupkz %v0, 0, 255
+0xe6 0xff 0x00 0x00 0x00 0x3c
+
+# CHECK: vupkz %v15, 0, 0
+0xe6 0x00 0x00 0x00 0xf0 0x3c
+
+# CHECK: vupkz %v31, 0, 0
+0xe6 0x00 0x00 0x00 0xf1 0x3c
+
+# CHECK: vupkz %v18, 1383(%r4), 3
+0xe6 0x03 0x45 0x67 0x21 0x3c
+
+# CHECK: wfasb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe3
+
+# CHECK: wfasb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe3
+
+# CHECK: wfasb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x08 0x22 0xe3
+
+# CHECK: wfasb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x08 0x24 0xe3
+
+# CHECK: wfasb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xe3
+
+# CHECK: wfasb %v18, %f3, %v20
+0xe7 0x23 0x40 0x08 0x2a 0xe3
+
+# CHECK: wfaxb %v0, %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xe3
+
+# CHECK: wfaxb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x08 0x42 0xe3
+
+# CHECK: wfaxb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x08 0x44 0xe3
+
+# CHECK: wfaxb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xe3
+
+# CHECK: wfaxb %v18, %v3, %v20
+0xe7 0x23 0x40 0x08 0x4a 0xe3
+
+# CHECK: wfcsb %f0, %f0
+0xe7 0x00 0x00 0x00 0x20 0xcb
+
+# CHECK: wfcsb %f0, %f0
+0xe7 0x00 0x00 0x00 0x20 0xcb
+
+# CHECK: wfcsb %f0, %f15
+0xe7 0x0f 0x00 0x00 0x20 0xcb
+
+# CHECK: wfcsb %f0, %v31
+0xe7 0x0f 0x00 0x00 0x24 0xcb
+
+# CHECK: wfcsb %f15, %f0
+0xe7 0xf0 0x00 0x00 0x20 0xcb
+
+# CHECK: wfcsb %v31, %f0
+0xe7 0xf0 0x00 0x00 0x28 0xcb
+
+# CHECK: wfcsb %f14, %v17
+0xe7 0xe1 0x00 0x00 0x24 0xcb
+
+# CHECK: wfcxb %v0, %v0
+0xe7 0x00 0x00 0x00 0x40 0xcb
+
+# CHECK: wfcxb %v0, %v15
+0xe7 0x0f 0x00 0x00 0x40 0xcb
+
+# CHECK: wfcxb %v0, %v31
+0xe7 0x0f 0x00 0x00 0x44 0xcb
+
+# CHECK: wfcxb %v15, %v0
+0xe7 0xf0 0x00 0x00 0x40 0xcb
+
+# CHECK: wfcxb %v31, %v0
+0xe7 0xf0 0x00 0x00 0x48 0xcb
+
+# CHECK: wfcxb %v14, %v17
+0xe7 0xe1 0x00 0x00 0x44 0xcb
+
+# CHECK: wfcesb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe8
+
+# CHECK: wfcesb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe8
+
+# CHECK: wfcesb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x08 0x22 0xe8
+
+# CHECK: wfcesb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x08 0x24 0xe8
+
+# CHECK: wfcesb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xe8
+
+# CHECK: wfcesb %v18, %f3, %v20
+0xe7 0x23 0x40 0x08 0x2a 0xe8
+
+# CHECK: wfcesbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x18 0x20 0xe8
+
+# CHECK: wfcesbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x18 0x20 0xe8
+
+# CHECK: wfcesbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x18 0x22 0xe8
+
+# CHECK: wfcesbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x18 0x24 0xe8
+
+# CHECK: wfcesbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x18 0x28 0xe8
+
+# CHECK: wfcesbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x18 0x2a 0xe8
+
+# CHECK: wfcexb %v0, %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xe8
+
+# CHECK: wfcexb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x08 0x42 0xe8
+
+# CHECK: wfcexb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x08 0x44 0xe8
+
+# CHECK: wfcexb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xe8
+
+# CHECK: wfcexb %v18, %v3, %v20
+0xe7 0x23 0x40 0x08 0x4a 0xe8
+
+# CHECK: wfcexbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x18 0x40 0xe8
+
+# CHECK: wfcexbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x18 0x42 0xe8
+
+# CHECK: wfcexbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x18 0x44 0xe8
+
+# CHECK: wfcexbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x18 0x48 0xe8
+
+# CHECK: wfcexbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x18 0x4a 0xe8
+
+# CHECK: wfchsb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xeb
+
+# CHECK: wfchsb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xeb
+
+# CHECK: wfchsb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x08 0x22 0xeb
+
+# CHECK: wfchsb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x08 0x24 0xeb
+
+# CHECK: wfchsb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xeb
+
+# CHECK: wfchsb %v18, %f3, %v20
+0xe7 0x23 0x40 0x08 0x2a 0xeb
+
+# CHECK: wfchsbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x18 0x20 0xeb
+
+# CHECK: wfchsbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x18 0x20 0xeb
+
+# CHECK: wfchsbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x18 0x22 0xeb
+
+# CHECK: wfchsbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x18 0x24 0xeb
+
+# CHECK: wfchsbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x18 0x28 0xeb
+
+# CHECK: wfchsbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x18 0x2a 0xeb
+
+# CHECK: wfchxb %v0, %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xeb
+
+# CHECK: wfchxb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x08 0x42 0xeb
+
+# CHECK: wfchxb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x08 0x44 0xeb
+
+# CHECK: wfchxb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xeb
+
+# CHECK: wfchxb %v18, %v3, %v20
+0xe7 0x23 0x40 0x08 0x4a 0xeb
+
+# CHECK: wfchxbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x18 0x40 0xeb
+
+# CHECK: wfchxbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x18 0x42 0xeb
+
+# CHECK: wfchxbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x18 0x44 0xeb
+
+# CHECK: wfchxbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x18 0x48 0xeb
+
+# CHECK: wfchxbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x18 0x4a 0xeb
+
+# CHECK: wfchesb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xea
+
+# CHECK: wfchesb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xea
+
+# CHECK: wfchesb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x08 0x22 0xea
+
+# CHECK: wfchesb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x08 0x24 0xea
+
+# CHECK: wfchesb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xea
+
+# CHECK: wfchesb %v18, %f3, %v20
+0xe7 0x23 0x40 0x08 0x2a 0xea
+
+# CHECK: wfchesbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x18 0x20 0xea
+
+# CHECK: wfchesbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x18 0x20 0xea
+
+# CHECK: wfchesbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x18 0x22 0xea
+
+# CHECK: wfchesbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x18 0x24 0xea
+
+# CHECK: wfchesbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x18 0x28 0xea
+
+# CHECK: wfchesbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x18 0x2a 0xea
+
+# CHECK: wfchexb %v0, %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xea
+
+# CHECK: wfchexb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x08 0x42 0xea
+
+# CHECK: wfchexb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x08 0x44 0xea
+
+# CHECK: wfchexb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xea
+
+# CHECK: wfchexb %v18, %v3, %v20
+0xe7 0x23 0x40 0x08 0x4a 0xea
+
+# CHECK: wfchexbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x18 0x40 0xea
+
+# CHECK: wfchexbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x18 0x42 0xea
+
+# CHECK: wfchexbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x18 0x44 0xea
+
+# CHECK: wfchexbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x18 0x48 0xea
+
+# CHECK: wfchexbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x18 0x4a 0xea
+
+# CHECK: wfdsb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe5
+
+# CHECK: wfdsb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe5
+
+# CHECK: wfdsb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x08 0x22 0xe5
+
+# CHECK: wfdsb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x08 0x24 0xe5
+
+# CHECK: wfdsb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xe5
+
+# CHECK: wfdsb %v18, %f3, %v20
+0xe7 0x23 0x40 0x08 0x2a 0xe5
+
+# CHECK: wfdxb %v0, %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xe5
+
+# CHECK: wfdxb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x08 0x42 0xe5
+
+# CHECK: wfdxb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x08 0x44 0xe5
+
+# CHECK: wfdxb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xe5
+
+# CHECK: wfdxb %v18, %v3, %v20
+0xe7 0x23 0x40 0x08 0x4a 0xe5
+
+# CHECK: wfisb %f0, %f0, 0, 0
+0xe7 0x00 0x00 0x08 0x20 0xc7
+
+# CHECK: wfisb %f0, %f0, 0, 0
+0xe7 0x00 0x00 0x08 0x20 0xc7
+
+# CHECK: wfisb %f0, %f0, 0, 15
+0xe7 0x00 0x00 0xf8 0x20 0xc7
+
+# CHECK: wfisb %f0, %f0, 4, 0
+0xe7 0x00 0x00 0x0c 0x20 0xc7
+
+# CHECK: wfisb %f0, %f0, 7, 0
+0xe7 0x00 0x00 0x0f 0x20 0xc7
+
+# CHECK: wfisb %f0, %v31, 0, 0
+0xe7 0x0f 0x00 0x08 0x24 0xc7
+
+# CHECK: wfisb %v31, %f0, 0, 0
+0xe7 0xf0 0x00 0x08 0x28 0xc7
+
+# CHECK: wfisb %f14, %v17, 4, 10
+0xe7 0xe1 0x00 0xac 0x24 0xc7
+
+# CHECK: wfixb %v0, %v0, 0, 0
+0xe7 0x00 0x00 0x08 0x40 0xc7
+
+# CHECK: wfixb %v0, %v0, 0, 15
+0xe7 0x00 0x00 0xf8 0x40 0xc7
+
+# CHECK: wfixb %v0, %v0, 4, 0
+0xe7 0x00 0x00 0x0c 0x40 0xc7
+
+# CHECK: wfixb %v0, %v0, 7, 0
+0xe7 0x00 0x00 0x0f 0x40 0xc7
+
+# CHECK: wfixb %v0, %v31, 0, 0
+0xe7 0x0f 0x00 0x08 0x44 0xc7
+
+# CHECK: wfixb %v31, %v0, 0, 0
+0xe7 0xf0 0x00 0x08 0x48 0xc7
+
+# CHECK: wfixb %v14, %v17, 4, 10
+0xe7 0xe1 0x00 0xac 0x44 0xc7
+
+# CHECK: wfksb %f0, %f0
+0xe7 0x00 0x00 0x00 0x20 0xca
+
+# CHECK: wfksb %f0, %f0
+0xe7 0x00 0x00 0x00 0x20 0xca
+
+# CHECK: wfksb %f0, %f15
+0xe7 0x0f 0x00 0x00 0x20 0xca
+
+# CHECK: wfksb %f0, %v31
+0xe7 0x0f 0x00 0x00 0x24 0xca
+
+# CHECK: wfksb %f15, %f0
+0xe7 0xf0 0x00 0x00 0x20 0xca
+
+# CHECK: wfksb %v31, %f0
+0xe7 0xf0 0x00 0x00 0x28 0xca
+
+# CHECK: wfksb %f14, %v17
+0xe7 0xe1 0x00 0x00 0x24 0xca
+
+# CHECK: wfkxb %v0, %v0
+0xe7 0x00 0x00 0x00 0x40 0xca
+
+# CHECK: wfkxb %v0, %v15
+0xe7 0x0f 0x00 0x00 0x40 0xca
+
+# CHECK: wfkxb %v0, %v31
+0xe7 0x0f 0x00 0x00 0x44 0xca
+
+# CHECK: wfkxb %v15, %v0
+0xe7 0xf0 0x00 0x00 0x40 0xca
+
+# CHECK: wfkxb %v31, %v0
+0xe7 0xf0 0x00 0x00 0x48 0xca
+
+# CHECK: wfkxb %v14, %v17
+0xe7 0xe1 0x00 0x00 0x44 0xca
+
+# CHECK: wfkedb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x30 0xe8
+
+# CHECK: wfkedb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x30 0xe8
+
+# CHECK: wfkedb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x0c 0x32 0xe8
+
+# CHECK: wfkedb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x0c 0x34 0xe8
+
+# CHECK: wfkedb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x0c 0x38 0xe8
+
+# CHECK: wfkedb %v18, %f3, %v20
+0xe7 0x23 0x40 0x0c 0x3a 0xe8
+
+# CHECK: wfkedbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x30 0xe8
+
+# CHECK: wfkedbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x30 0xe8
+
+# CHECK: wfkedbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x1c 0x32 0xe8
+
+# CHECK: wfkedbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x1c 0x34 0xe8
+
+# CHECK: wfkedbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x1c 0x38 0xe8
+
+# CHECK: wfkedbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x1c 0x3a 0xe8
+
+# CHECK: wfkesb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x20 0xe8
+
+# CHECK: wfkesb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x20 0xe8
+
+# CHECK: wfkesb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x0c 0x22 0xe8
+
+# CHECK: wfkesb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x0c 0x24 0xe8
+
+# CHECK: wfkesb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x0c 0x28 0xe8
+
+# CHECK: wfkesb %v18, %f3, %v20
+0xe7 0x23 0x40 0x0c 0x2a 0xe8
+
+# CHECK: wfkesbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x20 0xe8
+
+# CHECK: wfkesbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x20 0xe8
+
+# CHECK: wfkesbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x1c 0x22 0xe8
+
+# CHECK: wfkesbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x1c 0x24 0xe8
+
+# CHECK: wfkesbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x1c 0x28 0xe8
+
+# CHECK: wfkesbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x1c 0x2a 0xe8
+
+# CHECK: wfkexb %v0, %v0, %v0
+0xe7 0x00 0x00 0x0c 0x40 0xe8
+
+# CHECK: wfkexb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x0c 0x42 0xe8
+
+# CHECK: wfkexb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x0c 0x44 0xe8
+
+# CHECK: wfkexb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x0c 0x48 0xe8
+
+# CHECK: wfkexb %v18, %v3, %v20
+0xe7 0x23 0x40 0x0c 0x4a 0xe8
+
+# CHECK: wfkexbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x1c 0x40 0xe8
+
+# CHECK: wfkexbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x1c 0x42 0xe8
+
+# CHECK: wfkexbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x1c 0x44 0xe8
+
+# CHECK: wfkexbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x1c 0x48 0xe8
+
+# CHECK: wfkexbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x1c 0x4a 0xe8
+
+# CHECK: wfkhdb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x30 0xeb
+
+# CHECK: wfkhdb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x30 0xeb
+
+# CHECK: wfkhdb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x0c 0x32 0xeb
+
+# CHECK: wfkhdb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x0c 0x34 0xeb
+
+# CHECK: wfkhdb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x0c 0x38 0xeb
+
+# CHECK: wfkhdb %v18, %f3, %v20
+0xe7 0x23 0x40 0x0c 0x3a 0xeb
+
+# CHECK: wfkhdbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x30 0xeb
+
+# CHECK: wfkhdbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x30 0xeb
+
+# CHECK: wfkhdbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x1c 0x32 0xeb
+
+# CHECK: wfkhdbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x1c 0x34 0xeb
+
+# CHECK: wfkhdbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x1c 0x38 0xeb
+
+# CHECK: wfkhdbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x1c 0x3a 0xeb
+
+# CHECK: wfkhsb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x20 0xeb
+
+# CHECK: wfkhsb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x20 0xeb
+
+# CHECK: wfkhsb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x0c 0x22 0xeb
+
+# CHECK: wfkhsb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x0c 0x24 0xeb
+
+# CHECK: wfkhsb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x0c 0x28 0xeb
+
+# CHECK: wfkhsb %v18, %f3, %v20
+0xe7 0x23 0x40 0x0c 0x2a 0xeb
+
+# CHECK: wfkhsbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x20 0xeb
+
+# CHECK: wfkhsbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x20 0xeb
+
+# CHECK: wfkhsbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x1c 0x22 0xeb
+
+# CHECK: wfkhsbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x1c 0x24 0xeb
+
+# CHECK: wfkhsbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x1c 0x28 0xeb
+
+# CHECK: wfkhsbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x1c 0x2a 0xeb
+
+# CHECK: wfkhxb %v0, %v0, %v0
+0xe7 0x00 0x00 0x0c 0x40 0xeb
+
+# CHECK: wfkhxb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x0c 0x42 0xeb
+
+# CHECK: wfkhxb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x0c 0x44 0xeb
+
+# CHECK: wfkhxb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x0c 0x48 0xeb
+
+# CHECK: wfkhxb %v18, %v3, %v20
+0xe7 0x23 0x40 0x0c 0x4a 0xeb
+
+# CHECK: wfkhxbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x1c 0x40 0xeb
+
+# CHECK: wfkhxbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x1c 0x42 0xeb
+
+# CHECK: wfkhxbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x1c 0x44 0xeb
+
+# CHECK: wfkhxbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x1c 0x48 0xeb
+
+# CHECK: wfkhxbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x1c 0x4a 0xeb
+
+# CHECK: wfkhedb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x30 0xea
+
+# CHECK: wfkhedb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x30 0xea
+
+# CHECK: wfkhedb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x0c 0x32 0xea
+
+# CHECK: wfkhedb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x0c 0x34 0xea
+
+# CHECK: wfkhedb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x0c 0x38 0xea
+
+# CHECK: wfkhedb %v18, %f3, %v20
+0xe7 0x23 0x40 0x0c 0x3a 0xea
+
+# CHECK: wfkhedbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x30 0xea
+
+# CHECK: wfkhedbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x30 0xea
+
+# CHECK: wfkhedbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x1c 0x32 0xea
+
+# CHECK: wfkhedbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x1c 0x34 0xea
+
+# CHECK: wfkhedbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x1c 0x38 0xea
+
+# CHECK: wfkhedbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x1c 0x3a 0xea
+
+# CHECK: wfkhesb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x20 0xea
+
+# CHECK: wfkhesb %f0, %f0, %f0
+0xe7 0x00 0x00 0x0c 0x20 0xea
+
+# CHECK: wfkhesb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x0c 0x22 0xea
+
+# CHECK: wfkhesb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x0c 0x24 0xea
+
+# CHECK: wfkhesb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x0c 0x28 0xea
+
+# CHECK: wfkhesb %v18, %f3, %v20
+0xe7 0x23 0x40 0x0c 0x2a 0xea
+
+# CHECK: wfkhesbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x20 0xea
+
+# CHECK: wfkhesbs %f0, %f0, %f0
+0xe7 0x00 0x00 0x1c 0x20 0xea
+
+# CHECK: wfkhesbs %f0, %f0, %v31
+0xe7 0x00 0xf0 0x1c 0x22 0xea
+
+# CHECK: wfkhesbs %f0, %v31, %f0
+0xe7 0x0f 0x00 0x1c 0x24 0xea
+
+# CHECK: wfkhesbs %v31, %f0, %f0
+0xe7 0xf0 0x00 0x1c 0x28 0xea
+
+# CHECK: wfkhesbs %v18, %f3, %v20
+0xe7 0x23 0x40 0x1c 0x2a 0xea
+
+# CHECK: wfkhexb %v0, %v0, %v0
+0xe7 0x00 0x00 0x0c 0x40 0xea
+
+# CHECK: wfkhexb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x0c 0x42 0xea
+
+# CHECK: wfkhexb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x0c 0x44 0xea
+
+# CHECK: wfkhexb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x0c 0x48 0xea
+
+# CHECK: wfkhexb %v18, %v3, %v20
+0xe7 0x23 0x40 0x0c 0x4a 0xea
+
+# CHECK: wfkhexbs %v0, %v0, %v0
+0xe7 0x00 0x00 0x1c 0x40 0xea
+
+# CHECK: wfkhexbs %v0, %v0, %v31
+0xe7 0x00 0xf0 0x1c 0x42 0xea
+
+# CHECK: wfkhexbs %v0, %v31, %v0
+0xe7 0x0f 0x00 0x1c 0x44 0xea
+
+# CHECK: wfkhexbs %v31, %v0, %v0
+0xe7 0xf0 0x00 0x1c 0x48 0xea
+
+# CHECK: wfkhexbs %v18, %v3, %v20
+0xe7 0x23 0x40 0x1c 0x4a 0xea
+
+# CHECK: wfpsosb %f0, %f0, 3
+0xe7 0x00 0x00 0x38 0x20 0xcc
+
+# CHECK: wfpsosb %f0, %f0, 3
+0xe7 0x00 0x00 0x38 0x20 0xcc
+
+# CHECK: wfpsosb %f0, %f0, 15
+0xe7 0x00 0x00 0xf8 0x20 0xcc
+
+# CHECK: wfpsosb %f0, %f15, 3
+0xe7 0x0f 0x00 0x38 0x20 0xcc
+
+# CHECK: wfpsosb %f0, %v31, 3
+0xe7 0x0f 0x00 0x38 0x24 0xcc
+
+# CHECK: wfpsosb %f15, %f0, 3
+0xe7 0xf0 0x00 0x38 0x20 0xcc
+
+# CHECK: wfpsosb %v31, %f0, 3
+0xe7 0xf0 0x00 0x38 0x28 0xcc
+
+# CHECK: wfpsosb %f14, %v17, 7
+0xe7 0xe1 0x00 0x78 0x24 0xcc
+
+# CHECK: wfpsoxb %v0, %v0, 3
+0xe7 0x00 0x00 0x38 0x40 0xcc
+
+# CHECK: wfpsoxb %v0, %v0, 15
+0xe7 0x00 0x00 0xf8 0x40 0xcc
+
+# CHECK: wfpsoxb %v0, %v15, 3
+0xe7 0x0f 0x00 0x38 0x40 0xcc
+
+# CHECK: wfpsoxb %v0, %v31, 3
+0xe7 0x0f 0x00 0x38 0x44 0xcc
+
+# CHECK: wfpsoxb %v15, %v0, 3
+0xe7 0xf0 0x00 0x38 0x40 0xcc
+
+# CHECK: wfpsoxb %v31, %v0, 3
+0xe7 0xf0 0x00 0x38 0x48 0xcc
+
+# CHECK: wfpsoxb %v14, %v17, 7
+0xe7 0xe1 0x00 0x78 0x44 0xcc
+
+# CHECK: wflcsb %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xcc
+
+# CHECK: wflcsb %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xcc
+
+# CHECK: wflcsb %f0, %f15
+0xe7 0x0f 0x00 0x08 0x20 0xcc
+
+# CHECK: wflcsb %f0, %v31
+0xe7 0x0f 0x00 0x08 0x24 0xcc
+
+# CHECK: wflcsb %f15, %f0
+0xe7 0xf0 0x00 0x08 0x20 0xcc
+
+# CHECK: wflcsb %v31, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xcc
+
+# CHECK: wflcsb %f14, %v17
+0xe7 0xe1 0x00 0x08 0x24 0xcc
+
+# CHECK: wflcxb %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xcc
+
+# CHECK: wflcxb %v0, %v15
+0xe7 0x0f 0x00 0x08 0x40 0xcc
+
+# CHECK: wflcxb %v0, %v31
+0xe7 0x0f 0x00 0x08 0x44 0xcc
+
+# CHECK: wflcxb %v15, %v0
+0xe7 0xf0 0x00 0x08 0x40 0xcc
+
+# CHECK: wflcxb %v31, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xcc
+
+# CHECK: wflcxb %v14, %v17
+0xe7 0xe1 0x00 0x08 0x44 0xcc
+
+# CHECK: wflnsb %f0, %f0
+0xe7 0x00 0x00 0x18 0x20 0xcc
+
+# CHECK: wflnsb %f0, %f0
+0xe7 0x00 0x00 0x18 0x20 0xcc
+
+# CHECK: wflnsb %f0, %f15
+0xe7 0x0f 0x00 0x18 0x20 0xcc
+
+# CHECK: wflnsb %f0, %v31
+0xe7 0x0f 0x00 0x18 0x24 0xcc
+
+# CHECK: wflnsb %f15, %f0
+0xe7 0xf0 0x00 0x18 0x20 0xcc
+
+# CHECK: wflnsb %v31, %f0
+0xe7 0xf0 0x00 0x18 0x28 0xcc
+
+# CHECK: wflnsb %f14, %v17
+0xe7 0xe1 0x00 0x18 0x24 0xcc
+
+# CHECK: wflnxb %v0, %v0
+0xe7 0x00 0x00 0x18 0x40 0xcc
+
+# CHECK: wflnxb %v0, %v15
+0xe7 0x0f 0x00 0x18 0x40 0xcc
+
+# CHECK: wflnxb %v0, %v31
+0xe7 0x0f 0x00 0x18 0x44 0xcc
+
+# CHECK: wflnxb %v15, %v0
+0xe7 0xf0 0x00 0x18 0x40 0xcc
+
+# CHECK: wflnxb %v31, %v0
+0xe7 0xf0 0x00 0x18 0x48 0xcc
+
+# CHECK: wflnxb %v14, %v17
+0xe7 0xe1 0x00 0x18 0x44 0xcc
+
+# CHECK: wflpsb %f0, %f0
+0xe7 0x00 0x00 0x28 0x20 0xcc
+
+# CHECK: wflpsb %f0, %f0
+0xe7 0x00 0x00 0x28 0x20 0xcc
+
+# CHECK: wflpsb %f0, %f15
+0xe7 0x0f 0x00 0x28 0x20 0xcc
+
+# CHECK: wflpsb %f0, %v31
+0xe7 0x0f 0x00 0x28 0x24 0xcc
+
+# CHECK: wflpsb %f15, %f0
+0xe7 0xf0 0x00 0x28 0x20 0xcc
+
+# CHECK: wflpsb %v31, %f0
+0xe7 0xf0 0x00 0x28 0x28 0xcc
+
+# CHECK: wflpsb %f14, %v17
+0xe7 0xe1 0x00 0x28 0x24 0xcc
+
+# CHECK: wflpxb %v0, %v0
+0xe7 0x00 0x00 0x28 0x40 0xcc
+
+# CHECK: wflpxb %v0, %v15
+0xe7 0x0f 0x00 0x28 0x40 0xcc
+
+# CHECK: wflpxb %v0, %v31
+0xe7 0x0f 0x00 0x28 0x44 0xcc
+
+# CHECK: wflpxb %v15, %v0
+0xe7 0xf0 0x00 0x28 0x40 0xcc
+
+# CHECK: wflpxb %v31, %v0
+0xe7 0xf0 0x00 0x28 0x48 0xcc
+
+# CHECK: wflpxb %v14, %v17
+0xe7 0xe1 0x00 0x28 0x44 0xcc
+
+# CHECK: wflld %v0, %f0
+0xe7 0x00 0x00 0x08 0x30 0xc4
+
+# CHECK: wflld %v0, %f0
+0xe7 0x00 0x00 0x08 0x30 0xc4
+
+# CHECK: wflld %v0, %f15
+0xe7 0x0f 0x00 0x08 0x30 0xc4
+
+# CHECK: wflld %v0, %v31
+0xe7 0x0f 0x00 0x08 0x34 0xc4
+
+# CHECK: wflld %v15, %f0
+0xe7 0xf0 0x00 0x08 0x30 0xc4
+
+# CHECK: wflld %v31, %f0
+0xe7 0xf0 0x00 0x08 0x38 0xc4
+
+# CHECK: wflld %v14, %v17
+0xe7 0xe1 0x00 0x08 0x34 0xc4
+
+# CHECK: wflrx %f0, %v0, 0, 0
+0xe7 0x00 0x00 0x08 0x40 0xc5
+
+# CHECK: wflrx %f0, %v0, 0, 0
+0xe7 0x00 0x00 0x08 0x40 0xc5
+
+# CHECK: wflrx %f0, %v0, 0, 15
+0xe7 0x00 0x00 0xf8 0x40 0xc5
+
+# CHECK: wflrx %f0, %v0, 4, 0
+0xe7 0x00 0x00 0x0c 0x40 0xc5
+
+# CHECK: wflrx %f0, %v0, 7, 0
+0xe7 0x00 0x00 0x0f 0x40 0xc5
+
+# CHECK: wflrx %f0, %v31, 0, 0
+0xe7 0x0f 0x00 0x08 0x44 0xc5
+
+# CHECK: wflrx %v31, %v0, 0, 0
+0xe7 0xf0 0x00 0x08 0x48 0xc5
+
+# CHECK: wflrx %f14, %v17, 4, 10
+0xe7 0xe1 0x00 0xac 0x44 0xc5
+
+# CHECK: wfmaxdb %f0, %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x30 0xef
+
+# CHECK: wfmaxdb %f0, %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x30 0xef
+
+# CHECK: wfmaxdb %f0, %f0, %f0, 4
+0xe7 0x00 0x00 0x48 0x30 0xef
+
+# CHECK: wfmaxdb %f0, %f0, %v31, 0
+0xe7 0x00 0xf0 0x08 0x32 0xef
+
+# CHECK: wfmaxdb %f0, %v31, %f0, 0
+0xe7 0x0f 0x00 0x08 0x34 0xef
+
+# CHECK: wfmaxdb %v31, %f0, %f0, 0
+0xe7 0xf0 0x00 0x08 0x38 0xef
+
+# CHECK: wfmaxdb %v18, %f3, %v20, 11
+0xe7 0x23 0x40 0xb8 0x3a 0xef
+
+# CHECK: wfmaxsb %f0, %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x20 0xef
+
+# CHECK: wfmaxsb %f0, %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x20 0xef
+
+# CHECK: wfmaxsb %f0, %f0, %f0, 4
+0xe7 0x00 0x00 0x48 0x20 0xef
+
+# CHECK: wfmaxsb %f0, %f0, %v31, 0
+0xe7 0x00 0xf0 0x08 0x22 0xef
+
+# CHECK: wfmaxsb %f0, %v31, %f0, 0
+0xe7 0x0f 0x00 0x08 0x24 0xef
+
+# CHECK: wfmaxsb %v31, %f0, %f0, 0
+0xe7 0xf0 0x00 0x08 0x28 0xef
+
+# CHECK: wfmaxsb %v18, %f3, %v20, 11
+0xe7 0x23 0x40 0xb8 0x2a 0xef
+
+# CHECK: wfmaxxb %v0, %v0, %v0, 0
+0xe7 0x00 0x00 0x08 0x40 0xef
+
+# CHECK: wfmaxxb %v0, %v0, %v0, 4
+0xe7 0x00 0x00 0x48 0x40 0xef
+
+# CHECK: wfmaxxb %v0, %v0, %v31, 0
+0xe7 0x00 0xf0 0x08 0x42 0xef
+
+# CHECK: wfmaxxb %v0, %v31, %v0, 0
+0xe7 0x0f 0x00 0x08 0x44 0xef
+
+# CHECK: wfmaxxb %v31, %v0, %v0, 0
+0xe7 0xf0 0x00 0x08 0x48 0xef
+
+# CHECK: wfmaxxb %v18, %v3, %v20, 11
+0xe7 0x23 0x40 0xb8 0x4a 0xef
+
+# CHECK: wfmindb %f0, %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x30 0xee
+
+# CHECK: wfmindb %f0, %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x30 0xee
+
+# CHECK: wfmindb %f0, %f0, %f0, 4
+0xe7 0x00 0x00 0x48 0x30 0xee
+
+# CHECK: wfmindb %f0, %f0, %v31, 0
+0xe7 0x00 0xf0 0x08 0x32 0xee
+
+# CHECK: wfmindb %f0, %v31, %f0, 0
+0xe7 0x0f 0x00 0x08 0x34 0xee
+
+# CHECK: wfmindb %v31, %f0, %f0, 0
+0xe7 0xf0 0x00 0x08 0x38 0xee
+
+# CHECK: wfmindb %v18, %f3, %v20, 11
+0xe7 0x23 0x40 0xb8 0x3a 0xee
+
+# CHECK: wfminsb %f0, %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x20 0xee
+
+# CHECK: wfminsb %f0, %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x20 0xee
+
+# CHECK: wfminsb %f0, %f0, %f0, 4
+0xe7 0x00 0x00 0x48 0x20 0xee
+
+# CHECK: wfminsb %f0, %f0, %v31, 0
+0xe7 0x00 0xf0 0x08 0x22 0xee
+
+# CHECK: wfminsb %f0, %v31, %f0, 0
+0xe7 0x0f 0x00 0x08 0x24 0xee
+
+# CHECK: wfminsb %v31, %f0, %f0, 0
+0xe7 0xf0 0x00 0x08 0x28 0xee
+
+# CHECK: wfminsb %v18, %f3, %v20, 11
+0xe7 0x23 0x40 0xb8 0x2a 0xee
+
+# CHECK: wfminxb %v0, %v0, %v0, 0
+0xe7 0x00 0x00 0x08 0x40 0xee
+
+# CHECK: wfminxb %v0, %v0, %v0, 4
+0xe7 0x00 0x00 0x48 0x40 0xee
+
+# CHECK: wfminxb %v0, %v0, %v31, 0
+0xe7 0x00 0xf0 0x08 0x42 0xee
+
+# CHECK: wfminxb %v0, %v31, %v0, 0
+0xe7 0x0f 0x00 0x08 0x44 0xee
+
+# CHECK: wfminxb %v31, %v0, %v0, 0
+0xe7 0xf0 0x00 0x08 0x48 0xee
+
+# CHECK: wfminxb %v18, %v3, %v20, 11
+0xe7 0x23 0x40 0xb8 0x4a 0xee
+
+# CHECK: wfmasb %f0, %f0, %f0, %f0
+0xe7 0x00 0x02 0x08 0x00 0x8f
+
+# CHECK: wfmasb %f0, %f0, %f0, %f0
+0xe7 0x00 0x02 0x08 0x00 0x8f
+
+# CHECK: wfmasb %f0, %f0, %f0, %v31
+0xe7 0x00 0x02 0x08 0xf1 0x8f
+
+# CHECK: wfmasb %f0, %f0, %v31, %f0
+0xe7 0x00 0xf2 0x08 0x02 0x8f
+
+# CHECK: wfmasb %f0, %v31, %f0, %f0
+0xe7 0x0f 0x02 0x08 0x04 0x8f
+
+# CHECK: wfmasb %v31, %f0, %f0, %f0
+0xe7 0xf0 0x02 0x08 0x08 0x8f
+
+# CHECK: wfmasb %f13, %v17, %v21, %v25
+0xe7 0xd1 0x52 0x08 0x97 0x8f
+
+# CHECK: wfmaxb %v0, %v0, %v0, %v0
+0xe7 0x00 0x04 0x08 0x00 0x8f
+
+# CHECK: wfmaxb %v0, %v0, %v0, %v31
+0xe7 0x00 0x04 0x08 0xf1 0x8f
+
+# CHECK: wfmaxb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf4 0x08 0x02 0x8f
+
+# CHECK: wfmaxb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x04 0x08 0x04 0x8f
+
+# CHECK: wfmaxb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x04 0x08 0x08 0x8f
+
+# CHECK: wfmaxb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x54 0x08 0x97 0x8f
+
+# CHECK: wfmsb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe7
+
+# CHECK: wfmsb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe7
+
+# CHECK: wfmsb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x08 0x22 0xe7
+
+# CHECK: wfmsb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x08 0x24 0xe7
+
+# CHECK: wfmsb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xe7
+
+# CHECK: wfmsb %v18, %f3, %v20
+0xe7 0x23 0x40 0x08 0x2a 0xe7
+
+# CHECK: wfmxb %v0, %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xe7
+
+# CHECK: wfmxb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x08 0x42 0xe7
+
+# CHECK: wfmxb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x08 0x44 0xe7
+
+# CHECK: wfmxb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xe7
+
+# CHECK: wfmxb %v18, %v3, %v20
+0xe7 0x23 0x40 0x08 0x4a 0xe7
+
+# CHECK: wfmssb %f0, %f0, %f0, %f0
+0xe7 0x00 0x02 0x08 0x00 0x8e
+
+# CHECK: wfmssb %f0, %f0, %f0, %f0
+0xe7 0x00 0x02 0x08 0x00 0x8e
+
+# CHECK: wfmssb %f0, %f0, %f0, %v31
+0xe7 0x00 0x02 0x08 0xf1 0x8e
+
+# CHECK: wfmssb %f0, %f0, %v31, %f0
+0xe7 0x00 0xf2 0x08 0x02 0x8e
+
+# CHECK: wfmssb %f0, %v31, %f0, %f0
+0xe7 0x0f 0x02 0x08 0x04 0x8e
+
+# CHECK: wfmssb %v31, %f0, %f0, %f0
+0xe7 0xf0 0x02 0x08 0x08 0x8e
+
+# CHECK: wfmssb %f13, %v17, %v21, %v25
+0xe7 0xd1 0x52 0x08 0x97 0x8e
+
+# CHECK: wfmsxb %v0, %v0, %v0, %v0
+0xe7 0x00 0x04 0x08 0x00 0x8e
+
+# CHECK: wfmsxb %v0, %v0, %v0, %v31
+0xe7 0x00 0x04 0x08 0xf1 0x8e
+
+# CHECK: wfmsxb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf4 0x08 0x02 0x8e
+
+# CHECK: wfmsxb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x04 0x08 0x04 0x8e
+
+# CHECK: wfmsxb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x04 0x08 0x08 0x8e
+
+# CHECK: wfmsxb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x54 0x08 0x97 0x8e
+
+# CHECK: wfnmadb %f0, %f0, %f0, %f0
+0xe7 0x00 0x03 0x08 0x00 0x9f
+
+# CHECK: wfnmadb %f0, %f0, %f0, %f0
+0xe7 0x00 0x03 0x08 0x00 0x9f
+
+# CHECK: wfnmadb %f0, %f0, %f0, %v31
+0xe7 0x00 0x03 0x08 0xf1 0x9f
+
+# CHECK: wfnmadb %f0, %f0, %v31, %f0
+0xe7 0x00 0xf3 0x08 0x02 0x9f
+
+# CHECK: wfnmadb %f0, %v31, %f0, %f0
+0xe7 0x0f 0x03 0x08 0x04 0x9f
+
+# CHECK: wfnmadb %v31, %f0, %f0, %f0
+0xe7 0xf0 0x03 0x08 0x08 0x9f
+
+# CHECK: wfnmadb %f13, %v17, %v21, %v25
+0xe7 0xd1 0x53 0x08 0x97 0x9f
+
+# CHECK: wfnmasb %f0, %f0, %f0, %f0
+0xe7 0x00 0x02 0x08 0x00 0x9f
+
+# CHECK: wfnmasb %f0, %f0, %f0, %f0
+0xe7 0x00 0x02 0x08 0x00 0x9f
+
+# CHECK: wfnmasb %f0, %f0, %f0, %v31
+0xe7 0x00 0x02 0x08 0xf1 0x9f
+
+# CHECK: wfnmasb %f0, %f0, %v31, %f0
+0xe7 0x00 0xf2 0x08 0x02 0x9f
+
+# CHECK: wfnmasb %f0, %v31, %f0, %f0
+0xe7 0x0f 0x02 0x08 0x04 0x9f
+
+# CHECK: wfnmasb %v31, %f0, %f0, %f0
+0xe7 0xf0 0x02 0x08 0x08 0x9f
+
+# CHECK: wfnmasb %f13, %v17, %v21, %v25
+0xe7 0xd1 0x52 0x08 0x97 0x9f
+
+# CHECK: wfnmaxb %v0, %v0, %v0, %v0
+0xe7 0x00 0x04 0x08 0x00 0x9f
+
+# CHECK: wfnmaxb %v0, %v0, %v0, %v31
+0xe7 0x00 0x04 0x08 0xf1 0x9f
+
+# CHECK: wfnmaxb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf4 0x08 0x02 0x9f
+
+# CHECK: wfnmaxb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x04 0x08 0x04 0x9f
+
+# CHECK: wfnmaxb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x04 0x08 0x08 0x9f
+
+# CHECK: wfnmaxb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x54 0x08 0x97 0x9f
+
+# CHECK: wfnmsdb %f0, %f0, %f0, %f0
+0xe7 0x00 0x03 0x08 0x00 0x9e
+
+# CHECK: wfnmsdb %f0, %f0, %f0, %f0
+0xe7 0x00 0x03 0x08 0x00 0x9e
+
+# CHECK: wfnmsdb %f0, %f0, %f0, %v31
+0xe7 0x00 0x03 0x08 0xf1 0x9e
+
+# CHECK: wfnmsdb %f0, %f0, %v31, %f0
+0xe7 0x00 0xf3 0x08 0x02 0x9e
+
+# CHECK: wfnmsdb %f0, %v31, %f0, %f0
+0xe7 0x0f 0x03 0x08 0x04 0x9e
+
+# CHECK: wfnmsdb %v31, %f0, %f0, %f0
+0xe7 0xf0 0x03 0x08 0x08 0x9e
+
+# CHECK: wfnmsdb %f13, %v17, %v21, %v25
+0xe7 0xd1 0x53 0x08 0x97 0x9e
+
+# CHECK: wfnmssb %f0, %f0, %f0, %f0
+0xe7 0x00 0x02 0x08 0x00 0x9e
+
+# CHECK: wfnmssb %f0, %f0, %f0, %f0
+0xe7 0x00 0x02 0x08 0x00 0x9e
+
+# CHECK: wfnmssb %f0, %f0, %f0, %v31
+0xe7 0x00 0x02 0x08 0xf1 0x9e
+
+# CHECK: wfnmssb %f0, %f0, %v31, %f0
+0xe7 0x00 0xf2 0x08 0x02 0x9e
+
+# CHECK: wfnmssb %f0, %v31, %f0, %f0
+0xe7 0x0f 0x02 0x08 0x04 0x9e
+
+# CHECK: wfnmssb %v31, %f0, %f0, %f0
+0xe7 0xf0 0x02 0x08 0x08 0x9e
+
+# CHECK: wfnmssb %f13, %v17, %v21, %v25
+0xe7 0xd1 0x52 0x08 0x97 0x9e
+
+# CHECK: wfnmsxb %v0, %v0, %v0, %v0
+0xe7 0x00 0x04 0x08 0x00 0x9e
+
+# CHECK: wfnmsxb %v0, %v0, %v0, %v31
+0xe7 0x00 0x04 0x08 0xf1 0x9e
+
+# CHECK: wfnmsxb %v0, %v0, %v31, %v0
+0xe7 0x00 0xf4 0x08 0x02 0x9e
+
+# CHECK: wfnmsxb %v0, %v31, %v0, %v0
+0xe7 0x0f 0x04 0x08 0x04 0x9e
+
+# CHECK: wfnmsxb %v31, %v0, %v0, %v0
+0xe7 0xf0 0x04 0x08 0x08 0x9e
+
+# CHECK: wfnmsxb %v13, %v17, %v21, %v25
+0xe7 0xd1 0x54 0x08 0x97 0x9e
+
+# CHECK: wfssb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe2
+
+# CHECK: wfssb %f0, %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xe2
+
+# CHECK: wfssb %f0, %f0, %v31
+0xe7 0x00 0xf0 0x08 0x22 0xe2
+
+# CHECK: wfssb %f0, %v31, %f0
+0xe7 0x0f 0x00 0x08 0x24 0xe2
+
+# CHECK: wfssb %v31, %f0, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xe2
+
+# CHECK: wfssb %v18, %f3, %v20
+0xe7 0x23 0x40 0x08 0x2a 0xe2
+
+# CHECK: wfsxb %v0, %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xe2
+
+# CHECK: wfsxb %v0, %v0, %v31
+0xe7 0x00 0xf0 0x08 0x42 0xe2
+
+# CHECK: wfsxb %v0, %v31, %v0
+0xe7 0x0f 0x00 0x08 0x44 0xe2
+
+# CHECK: wfsxb %v31, %v0, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xe2
+
+# CHECK: wfsxb %v18, %v3, %v20
+0xe7 0x23 0x40 0x08 0x4a 0xe2
+
+# CHECK: wfsqsb %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xce
+
+# CHECK: wfsqsb %f0, %f0
+0xe7 0x00 0x00 0x08 0x20 0xce
+
+# CHECK: wfsqsb %f0, %f15
+0xe7 0x0f 0x00 0x08 0x20 0xce
+
+# CHECK: wfsqsb %f0, %v31
+0xe7 0x0f 0x00 0x08 0x24 0xce
+
+# CHECK: wfsqsb %f15, %f0
+0xe7 0xf0 0x00 0x08 0x20 0xce
+
+# CHECK: wfsqsb %v31, %f0
+0xe7 0xf0 0x00 0x08 0x28 0xce
+
+# CHECK: wfsqsb %f14, %v17
+0xe7 0xe1 0x00 0x08 0x24 0xce
+
+# CHECK: wfsqxb %v0, %v0
+0xe7 0x00 0x00 0x08 0x40 0xce
+
+# CHECK: wfsqxb %v0, %v15
+0xe7 0x0f 0x00 0x08 0x40 0xce
+
+# CHECK: wfsqxb %v0, %v31
+0xe7 0x0f 0x00 0x08 0x44 0xce
+
+# CHECK: wfsqxb %v15, %v0
+0xe7 0xf0 0x00 0x08 0x40 0xce
+
+# CHECK: wfsqxb %v31, %v0
+0xe7 0xf0 0x00 0x08 0x48 0xce
+
+# CHECK: wfsqxb %v14, %v17
+0xe7 0xe1 0x00 0x08 0x44 0xce
+
+# CHECK: wftcisb %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x20 0x4a
+
+# CHECK: wftcisb %f0, %f0, 0
+0xe7 0x00 0x00 0x08 0x20 0x4a
+
+# CHECK: wftcisb %f0, %f0, 4095
+0xe7 0x00 0xff 0xf8 0x20 0x4a
+
+# CHECK: wftcisb %f0, %f15, 0
+0xe7 0x0f 0x00 0x08 0x20 0x4a
+
+# CHECK: wftcisb %f0, %v31, 0
+0xe7 0x0f 0x00 0x08 0x24 0x4a
+
+# CHECK: wftcisb %f15, %f0, 0
+0xe7 0xf0 0x00 0x08 0x20 0x4a
+
+# CHECK: wftcisb %v31, %f0, 0
+0xe7 0xf0 0x00 0x08 0x28 0x4a
+
+# CHECK: wftcisb %f4, %v21, 1656
+0xe7 0x45 0x67 0x88 0x24 0x4a
+
+# CHECK: wftcixb %v0, %v0, 0
+0xe7 0x00 0x00 0x08 0x40 0x4a
+
+# CHECK: wftcixb %v0, %v0, 4095
+0xe7 0x00 0xff 0xf8 0x40 0x4a
+
+# CHECK: wftcixb %v0, %v15, 0
+0xe7 0x0f 0x00 0x08 0x40 0x4a
+
+# CHECK: wftcixb %v0, %v31, 0
+0xe7 0x0f 0x00 0x08 0x44 0x4a
+
+# CHECK: wftcixb %v15, %v0, 0
+0xe7 0xf0 0x00 0x08 0x40 0x4a
+
+# CHECK: wftcixb %v31, %v0, 0
+0xe7 0xf0 0x00 0x08 0x48 0x4a
+
+# CHECK: wftcixb %v4, %v21, 1656
+0xe7 0x45 0x67 0x88 0x44 0x4a
+
diff --git a/test/MC/Mips/mt/invalid-wrong-error.s b/test/MC/Mips/mt/invalid-wrong-error.s
deleted file mode 100644
index 0247089b70ae6..0000000000000
--- a/test/MC/Mips/mt/invalid-wrong-error.s
+++ /dev/null
@@ -1,3 +0,0 @@
-# RUN: not llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt < %s 2>&1 | FileCheck %s
- mftr 0($4), $5, 0, 0, 0 # CHECK: error: unexpected token in argument list
- mttr 0($4), $5, 0, 0, 0 # CHECK: error: unexpected token in argument list
diff --git a/test/MC/Mips/mt/invalid.s b/test/MC/Mips/mt/invalid.s
index d4055c4a50f44..5a145a7e0850a 100644
--- a/test/MC/Mips/mt/invalid.s
+++ b/test/MC/Mips/mt/invalid.s
@@ -1,27 +1,13 @@
# RUN: not llvm-mc -arch=mips -mcpu=mips32 -mattr=+mt < %s 2>&1 | FileCheck %s
- dmt 4 # CHECK: error: invalid operand for instruction
- dmt $4, $5 # CHECK: error: invalid operand for instruction
- dmt $5, 0($4) # CHECK: error: invalid operand for instruction
- emt 4 # CHECK: error: invalid operand for instruction
- emt $4, $5 # CHECK: error: invalid operand for instruction
- emt $5, 0($5) # CHECK: error: invalid operand for instruction
- dvpe 4 # CHECK: error: invalid operand for instruction
- dvpe $4, $5 # CHECK: error: invalid operand for instruction
- dvpe $5, 0($4) # CHECK: error: invalid operand for instruction
- evpe 4 # CHECK: error: invalid operand for instruction
- evpe $4, $5 # CHECK: error: invalid operand for instruction
- evpe $5, 0($5) # CHECK: error: invalid operand for instruction
- mftr $4, 0($5), 0, 0, 0 # CHECK: error: invalid operand for instruction
- mftr $4, $5, 2, 0, 0 # CHECK: error: expected 1-bit unsigned immediate
- mftr $4, $5, -1, 0, 0 # CHECK: error: expected 1-bit unsigned immediate
- mftr $4, $5, 0, 8, 0 # CHECK: error: expected 3-bit unsigned immediate
- mftr $4, $5, 0, -1, 0 # CHECK: error: expected 3-bit unsigned immediate
- mftr $4, $4, 0, 0, 2 # CHECK: error: expected 1-bit unsigned immediate
- mftr $4, $5, 0, 0, -1 # CHECK: error: expected 1-bit unsigned immediate
- mttr $4, 0($5), 0, 0, 0 # CHECK: error: invalid operand for instruction
- mttr $4, $5, 2, 0, 0 # CHECK: error: expected 1-bit unsigned immediate
- mttr $4, $5, -1, 0, 0 # CHECK: error: expected 1-bit unsigned immediate
- mttr $4, $5, 0, 8, 0 # CHECK: error: expected 3-bit unsigned immediate
- mttr $4, $5, 0, -1, 0 # CHECK: error: expected 3-bit unsigned immediate
- mttr $4, $4, 0, 0, 2 # CHECK: error: expected 1-bit unsigned immediate
- mttr $4, $5, 0, 0, -1 # CHECK: error: expected 1-bit unsigned immediate
+ dmt 4 # CHECK: error: invalid operand for instruction
+ dmt $4, $5 # CHECK: error: invalid operand for instruction
+ dmt $5, 0($4) # CHECK: error: invalid operand for instruction
+ emt 4 # CHECK: error: invalid operand for instruction
+ emt $4, $5 # CHECK: error: invalid operand for instruction
+ emt $5, 0($5) # CHECK: error: invalid operand for instruction
+ dvpe 4 # CHECK: error: invalid operand for instruction
+ dvpe $4, $5 # CHECK: error: invalid operand for instruction
+ dvpe $5, 0($4) # CHECK: error: invalid operand for instruction
+ evpe 4 # CHECK: error: invalid operand for instruction
+ evpe $4, $5 # CHECK: error: invalid operand for instruction
+ evpe $5, 0($5) # CHECK: error: invalid operand for instruction
diff --git a/test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s b/test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s
deleted file mode 100644
index 4e872412e6ef2..0000000000000
--- a/test/MC/Mips/mt/mftr-mttr-aliases-invalid-wrong-error.s
+++ /dev/null
@@ -1,18 +0,0 @@
-# RUN: not llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s \
-# RUN: 2>&1 | FileCheck %s
-
-# The integrated assembler produces a wrong or misleading error message.
-
- mftc0 0($4), $5 # CHECK: error: unexpected token in argument list
- mftc0 0($4), $5, 1 # CHECK: error: unexpected token in argument list
- mftgpr 0($4), $5 # CHECK: error: unexpected token in argument list
- mftlo 0($3) # CHECK: error: unexpected token in argument list
- mftlo 0($3), $ac1 # CHECK: error: unexpected token in argument list
- mfthi 0($3) # CHECK: error: unexpected token in argument list
- mfthi 0($3), $ac1 # CHECK: error: unexpected token in argument list
- mftacx 0($3) # CHECK: error: unexpected token in argument list
- mftacx 0($3), $ac1 # CHECK: error: unexpected token in argument list
- mftdsp 0($4) # CHECK: error: unexpected token in argument list
- mftc1 0($4), $f4 # CHECK: error: unexpected token in argument list
- mfthc1 0($4), $f4 # CHECK: error: unexpected token in argument list
- cftc1 0($4), $f8 # CHECK: error: unexpected token in argument list
diff --git a/test/MC/Mips/mt/mftr-mttr-aliases-invalid.s b/test/MC/Mips/mt/mftr-mttr-aliases-invalid.s
deleted file mode 100644
index 06ae8c72e654a..0000000000000
--- a/test/MC/Mips/mt/mftr-mttr-aliases-invalid.s
+++ /dev/null
@@ -1,23 +0,0 @@
-# RUN: not llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s \
-# RUN: 2>&1 | FileCheck %s
-
- mftc0 $4, 0($5) # CHECK: error: invalid operand for instruction
- mftc0 $4, 0($5), 1 # CHECK: error: invalid operand for instruction
- mftc0 $4, $5, -1 # CHECK: error: expected 3-bit unsigned immediate
- mftc0 $4, $5, 9 # CHECK: error: expected 3-bit unsigned immediate
- mftc0 $4, $5, $6 # CHECK: error: expected 3-bit unsigned immediate
- mftgpr $4, 0($5) # CHECK: error: invalid operand for instruction
- mftgpr $4, $5, $6 # CHECK: error: invalid operand for instruction
- mftlo $3, 0($ac1) # CHECK: error: invalid operand for instruction
- mftlo $4, $ac1, $4 # CHECK: error: invalid operand for instruction
- mfthi $3, 0($ac1) # CHECK: error: invalid operand for instruction
- mfthi $4, $ac1, $4 # CHECK: error: invalid operand for instruction
- mftacx $3, 0($ac1) # CHECK: error: invalid operand for instruction
- mftacx $4, $ac1, $4 # CHECK: error: invalid operand for instruction
- mftdsp $4, $5 # CHECK: error: invalid operand for instruction
- mftdsp $4, $f5 # CHECK: error: invalid operand for instruction
- mftdsp $4, $ac0 # CHECK: error: invalid operand for instruction
- mftc1 $4, 0($f4) # CHECK: error: invalid operand for instruction
- mfthc1 $4, 0($f4) # CHECK: error: invalid operand for instruction
- cftc1 $4, 0($f4) # CHECK: error: invalid operand for instruction
- cftc1 $4, $f4, $5 # CHECK: error: invalid operand for instruction
diff --git a/test/MC/Mips/mt/mftr-mttr-aliases.s b/test/MC/Mips/mt/mftr-mttr-aliases.s
deleted file mode 100644
index 92ed9f9281f20..0000000000000
--- a/test/MC/Mips/mt/mftr-mttr-aliases.s
+++ /dev/null
@@ -1,47 +0,0 @@
-# RUN: llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s | FileCheck %s
-
-# Check the various aliases of the m[ft]tr instruction.
-
- mftc0 $4, $5 # CHECK: mftr $4, $5, 0, 0, 0 # encoding: [0x41,0x05,0x20,0x00]
- mftc0 $6, $7, 1 # CHECK: mftr $6, $7, 0, 1, 0 # encoding: [0x41,0x07,0x30,0x01]
- mftgpr $5, $9 # CHECK: mftr $5, $9, 1, 0, 0 # encoding: [0x41,0x09,0x28,0x20]
- mftlo $3 # CHECK: mftr $3, $zero, 1, 1, 0 # encoding: [0x41,0x00,0x18,0x21]
- mftlo $3, $ac0 # CHECK: mftr $3, $zero, 1, 1, 0 # encoding: [0x41,0x00,0x18,0x21]
- mftlo $3, $ac1 # CHECK: mftr $3, $4, 1, 1, 0 # encoding: [0x41,0x04,0x18,0x21]
- mftlo $3, $ac2 # CHECK: mftr $3, $8, 1, 1, 0 # encoding: [0x41,0x08,0x18,0x21]
- mftlo $3, $ac3 # CHECK: mftr $3, $12, 1, 1, 0 # encoding: [0x41,0x0c,0x18,0x21]
- mfthi $3, $ac0 # CHECK: mftr $3, $1, 1, 1, 0 # encoding: [0x41,0x01,0x18,0x21]
- mfthi $3, $ac1 # CHECK: mftr $3, $5, 1, 1, 0 # encoding: [0x41,0x05,0x18,0x21]
- mfthi $3, $ac2 # CHECK: mftr $3, $9, 1, 1, 0 # encoding: [0x41,0x09,0x18,0x21]
- mfthi $3, $ac3 # CHECK: mftr $3, $13, 1, 1, 0 # encoding: [0x41,0x0d,0x18,0x21]
- mftacx $3, $ac0 # CHECK: mftr $3, $2, 1, 1, 0 # encoding: [0x41,0x02,0x18,0x21]
- mftacx $3, $ac1 # CHECK: mftr $3, $6, 1, 1, 0 # encoding: [0x41,0x06,0x18,0x21]
- mftacx $3, $ac2 # CHECK: mftr $3, $10, 1, 1, 0 # encoding: [0x41,0x0a,0x18,0x21]
- mftacx $3, $ac3 # CHECK: mftr $3, $14, 1, 1, 0 # encoding: [0x41,0x0e,0x18,0x21]
- mftdsp $4 # CHECK: mftr $4, $16, 1, 1, 0 # encoding: [0x41,0x10,0x20,0x21]
- mftc1 $4, $f5 # CHECK: mftr $4, $5, 1, 2, 0 # encoding: [0x41,0x05,0x20,0x22]
- mfthc1 $4, $f5 # CHECK: mftr $4, $5, 1, 2, 1 # encoding: [0x41,0x05,0x20,0x32]
- cftc1 $4, $f9 # CHECK: mftr $4, $9, 1, 3, 0 # encoding: [0x41,0x09,0x20,0x23]
-
- mttc0 $4, $5 # CHECK: mttr $4, $5, 0, 0, 0 # encoding: [0x41,0x84,0x28,0x00]
- mttc0 $6, $7, 1 # CHECK: mttr $6, $7, 0, 1, 0 # encoding: [0x41,0x86,0x38,0x01]
- mttgpr $5, $9 # CHECK: mttr $5, $9, 1, 0, 0 # encoding: [0x41,0x85,0x48,0x20]
- mttlo $3 # CHECK: mttr $3, $zero, 1, 1, 0 # encoding: [0x41,0x83,0x00,0x21]
- mttlo $3, $ac0 # CHECK: mttr $3, $zero, 1, 1, 0 # encoding: [0x41,0x83,0x00,0x21]
- mttlo $3, $ac1 # CHECK: mttr $3, $4, 1, 1, 0 # encoding: [0x41,0x83,0x20,0x21]
- mttlo $3, $ac2 # CHECK: mttr $3, $8, 1, 1, 0 # encoding: [0x41,0x83,0x40,0x21]
- mttlo $3, $ac3 # CHECK: mttr $3, $12, 1, 1, 0 # encoding: [0x41,0x83,0x60,0x21]
- mtthi $3 # CHECK: mttr $3, $1, 1, 1, 0 # encoding: [0x41,0x83,0x08,0x21]
- mtthi $3, $ac0 # CHECK: mttr $3, $1, 1, 1, 0 # encoding: [0x41,0x83,0x08,0x21]
- mtthi $3, $ac1 # CHECK: mttr $3, $5, 1, 1, 0 # encoding: [0x41,0x83,0x28,0x21]
- mtthi $3, $ac2 # CHECK: mttr $3, $9, 1, 1, 0 # encoding: [0x41,0x83,0x48,0x21]
- mtthi $3, $ac3 # CHECK: mttr $3, $13, 1, 1, 0 # encoding: [0x41,0x83,0x68,0x21]
- mttacx $3 # CHECK: mttr $3, $2, 1, 1, 0 # encoding: [0x41,0x83,0x10,0x21]
- mttacx $3, $ac0 # CHECK: mttr $3, $2, 1, 1, 0 # encoding: [0x41,0x83,0x10,0x21]
- mttacx $3, $ac1 # CHECK: mttr $3, $6, 1, 1, 0 # encoding: [0x41,0x83,0x30,0x21]
- mttacx $3, $ac2 # CHECK: mttr $3, $10, 1, 1, 0 # encoding: [0x41,0x83,0x50,0x21]
- mttacx $3, $ac3 # CHECK: mttr $3, $14, 1, 1, 0 # encoding: [0x41,0x83,0x70,0x21]
- mttdsp $4 # CHECK: mttr $4, $16, 1, 1, 0 # encoding: [0x41,0x84,0x80,0x21]
- mttc1 $4, $f5 # CHECK: mttr $4, $5, 1, 2, 0 # encoding: [0x41,0x84,0x28,0x22]
- mtthc1 $4, $f5 # CHECK: mttr $4, $5, 1, 2, 1 # encoding: [0x41,0x84,0x28,0x32]
- cttc1 $4, $f9 # CHECK: mttr $4, $9, 1, 3, 0 # encoding: [0x41,0x84,0x48,0x23]
diff --git a/test/MC/Mips/mt/mftr-mttr-reserved-valid.s b/test/MC/Mips/mt/mftr-mttr-reserved-valid.s
deleted file mode 100644
index c40e81bfc7d75..0000000000000
--- a/test/MC/Mips/mt/mftr-mttr-reserved-valid.s
+++ /dev/null
@@ -1,8 +0,0 @@
-# RUN: llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s | FileCheck %s
-
-# The selector value and register values here are marked as reserved in the
-# documentation, but GAS accepts them without warning.
- mftr $31, $31, 1, 1, 0 # CHECK: mftr $ra, $ra, 1, 1, 0 # encoding: [0x41,0x1f,0xf8,0x21]
- mttr $31, $31, 1, 1, 0 # CHECK: mttr $ra, $ra, 1, 1, 0 # encoding: [0x41,0x9f,0xf8,0x21]
- mftr $31, $13, 1, 6, 0 # CHECK: mftr $ra, $13, 1, 6, 0 # encoding: [0x41,0x0d,0xf8,0x26]
- mttr $31, $13, 1, 6, 0 # CHECK: mttr $ra, $13, 1, 6, 0 # encoding: [0x41,0x9f,0x68,0x26]
diff --git a/test/MC/Mips/mt/valid.s b/test/MC/Mips/mt/valid.s
index 9fa07870a61f2..ab1179d05c6a9 100644
--- a/test/MC/Mips/mt/valid.s
+++ b/test/MC/Mips/mt/valid.s
@@ -1,33 +1,13 @@
# RUN: llvm-mc -arch=mips -mcpu=mips32r2 -mattr=+mt -show-encoding < %s \
# RUN: | FileCheck %s
- dmt # CHECK: dmt # encoding: [0x41,0x60,0x0b,0xc1]
- dmt $5 # CHECK: dmt $5 # encoding: [0x41,0x65,0x0b,0xc1]
- emt # CHECK: emt # encoding: [0x41,0x60,0x0b,0xe1]
- emt $4 # CHECK: emt $4 # encoding: [0x41,0x64,0x0b,0xe1]
- dvpe # CHECK: dvpe # encoding: [0x41,0x60,0x00,0x01]
- dvpe $6 # CHECK: dvpe $6 # encoding: [0x41,0x66,0x00,0x01]
- evpe # CHECK: evpe # encoding: [0x41,0x60,0x00,0x21]
- evpe $4 # CHECK: evpe $4 # encoding: [0x41,0x64,0x00,0x21]
- fork $2, $3, $5 # CHECK: fork $2, $3, $5 # encoding: [0x7c,0x65,0x10,0x08]
- yield $4 # CHECK: yield $4 # encoding: [0x7c,0x80,0x00,0x09]
- yield $4, $5 # CHECK: yield $4, $5 # encoding: [0x7c,0xa0,0x20,0x09]
- mftr $4, $5, 0, 2, 0 # CHECK: mftr $4, $5, 0, 2, 0 # encoding: [0x41,0x05,0x20,0x02]
- mftr $4, $5, 1, 0, 0 # CHECK: mftr $4, $5, 1, 0, 0 # encoding: [0x41,0x05,0x20,0x20]
- mftr $4, $0, 1, 1, 0 # CHECK: mftr $4, $zero, 1, 1, 0 # encoding: [0x41,0x00,0x20,0x21]
- mftr $4, $10, 1, 1, 0 # CHECK: mftr $4, $10, 1, 1, 0 # encoding: [0x41,0x0a,0x20,0x21]
- mftr $4, $10, 1, 2, 0 # CHECK: mftr $4, $10, 1, 2, 0 # encoding: [0x41,0x0a,0x20,0x22]
- mftr $4, $10, 1, 2, 1 # CHECK: mftr $4, $10, 1, 2, 1 # encoding: [0x41,0x0a,0x20,0x32]
- mftr $4, $26, 1, 3, 0 # CHECK: mftr $4, $26, 1, 3, 0 # encoding: [0x41,0x1a,0x20,0x23]
- mftr $4, $31, 1, 3, 0 # CHECK: mftr $4, $ra, 1, 3, 0 # encoding: [0x41,0x1f,0x20,0x23]
- mftr $4, $14, 1, 4, 0 # CHECK: mftr $4, $14, 1, 4, 0 # encoding: [0x41,0x0e,0x20,0x24]
- mftr $4, $15, 1, 5, 0 # CHECK: mftr $4, $15, 1, 5, 0 # encoding: [0x41,0x0f,0x20,0x25]
- mttr $4, $5, 0, 2, 0 # CHECK: mttr $4, $5, 0, 2, 0 # encoding: [0x41,0x84,0x28,0x02]
- mttr $4, $5, 1, 0, 0 # CHECK: mttr $4, $5, 1, 0, 0 # encoding: [0x41,0x84,0x28,0x20]
- mttr $4, $0, 1, 1, 0 # CHECK: mttr $4, $zero, 1, 1, 0 # encoding: [0x41,0x84,0x00,0x21]
- mttr $4, $10, 1, 1, 0 # CHECK: mttr $4, $10, 1, 1, 0 # encoding: [0x41,0x84,0x50,0x21]
- mttr $4, $10, 1, 2, 0 # CHECK: mttr $4, $10, 1, 2, 0 # encoding: [0x41,0x84,0x50,0x22]
- mttr $4, $10, 1, 2, 1 # CHECK: mttr $4, $10, 1, 2, 1 # encoding: [0x41,0x84,0x50,0x32]
- mttr $4, $26, 1, 3, 0 # CHECK: mttr $4, $26, 1, 3, 0 # encoding: [0x41,0x84,0xd0,0x23]
- mttr $4, $31, 1, 3, 0 # CHECK: mttr $4, $ra, 1, 3, 0 # encoding: [0x41,0x84,0xf8,0x23]
- mttr $4, $14, 1, 4, 0 # CHECK: mttr $4, $14, 1, 4, 0 # encoding: [0x41,0x84,0x70,0x24]
- mttr $4, $15, 1, 5, 0 # CHECK: mttr $4, $15, 1, 5, 0 # encoding: [0x41,0x84,0x78,0x25]
+ dmt # CHECK: dmt # encoding: [0x41,0x60,0x0b,0xc1]
+ dmt $5 # CHECK: dmt $5 # encoding: [0x41,0x65,0x0b,0xc1]
+ emt # CHECK: emt # encoding: [0x41,0x60,0x0b,0xe1]
+ emt $4 # CHECK: emt $4 # encoding: [0x41,0x64,0x0b,0xe1]
+ dvpe # CHECK: dvpe # encoding: [0x41,0x60,0x00,0x01]
+ dvpe $6 # CHECK: dvpe $6 # encoding: [0x41,0x66,0x00,0x01]
+ evpe # CHECK: evpe # encoding: [0x41,0x60,0x00,0x21]
+ evpe $4 # CHECK: evpe $4 # encoding: [0x41,0x64,0x00,0x21]
+ fork $2, $3, $5 # CHECK: fork $2, $3, $5 # encoding: [0x7c,0x65,0x10,0x08]
+ yield $4 # CHECK: yield $4 # encoding: [0x7c,0x80,0x00,0x09]
+ yield $4, $5 # CHECK: yield $4, $5 # encoding: [0x7c,0xa0,0x20,0x09]
diff --git a/test/MC/SystemZ/insn-bad-z13.s b/test/MC/SystemZ/insn-bad-z13.s
index e9fac44aa8835..0ccdd11cbe974 100644
--- a/test/MC/SystemZ/insn-bad-z13.s
+++ b/test/MC/SystemZ/insn-bad-z13.s
@@ -4,6 +4,19 @@
# RUN: not llvm-mc -triple s390x-linux-gnu -mcpu=arch11 < %s 2> %t
# RUN: FileCheck < %t %s
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: agh %r0, 0
+
+ agh %r0, 0
+
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: bi 0
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: bic 0, 0
+
+ bi 0
+ bic 0, 0
+
#CHECK: error: invalid operand
#CHECK: cdpt %f0, 0(1), -1
#CHECK: error: invalid operand
@@ -150,6 +163,16 @@
cxpt %f0, 0(-), 0
cxpt %f15, 0(1), 0
+#CHECK: error: instruction requires: insert-reference-bits-multiple
+#CHECK: irbm %r0, %r0
+
+ irbm %r0, %r0
+
+#CHECK: error: instruction requires: message-security-assist-extension8
+#CHECK: kma %r2, %r4, %r6
+
+ kma %r2, %r4, %r6
+
#CHECK: error: invalid operand
#CHECK: lcbb %r0, 0, -1
#CHECK: error: invalid operand
@@ -167,6 +190,21 @@
lcbb %r0, 4096, 0
lcbb %r0, 0(%v1,%r2), 0
+#CHECK: error: instruction requires: guarded-storage
+#CHECK: lgg %r0, 0
+
+ lgg %r0, 0
+
+#CHECK: error: instruction requires: guarded-storage
+#CHECK: lgsc %r0, 0
+
+ lgsc %r0, 0
+
+#CHECK: error: instruction requires: guarded-storage
+#CHECK: llgfsg %r0, 0
+
+ llgfsg %r0, 0
+
#CHECK: error: invalid operand
#CHECK: llzrgf %r0, -524289
#CHECK: error: invalid operand
@@ -249,6 +287,41 @@
lzrg %r0, -524289
lzrg %r0, 524288
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: mg %r0, 0
+
+ mg %r0, 0
+
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: mgh %r0, 0
+
+ mgh %r0, 0
+
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: mgrk %r0, %r0, %r0
+
+ mgrk %r0, %r0, %r0
+
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: msc %r0, 0
+
+ msc %r0, 0
+
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: msgc %r0, 0
+
+ msgc %r0, 0
+
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: msrkc %r0, %r0, %r0
+
+ msrkc %r0, %r0, %r0
+
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: msgrkc %r0, %r0, %r0
+
+ msgrkc %r0, %r0, %r0
+
#CHECK: error: invalid register pair
#CHECK: ppno %r1, %r2
#CHECK: error: invalid register pair
@@ -257,6 +330,21 @@
ppno %r1, %r2
ppno %r2, %r1
+#CHECK: error: instruction requires: message-security-assist-extension7
+#CHECK: prno %r2, %r4
+
+ prno %r2, %r4
+
+#CHECK: error: instruction requires: miscellaneous-extensions-2
+#CHECK: sgh %r0, 0
+
+ sgh %r0, 0
+
+#CHECK: error: instruction requires: guarded-storage
+#CHECK: stgsc %r0, 0
+
+ stgsc %r0, 0
+
#CHECK: error: invalid operand
#CHECK: stocfh %r0, 0, -1
#CHECK: error: invalid operand
@@ -274,6 +362,16 @@
stocfh %r0, 524288, 1
stocfh %r0, 0(%r1,%r2), 1
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vap %v0, %v0, %v0, 0, 0
+
+ vap %v0, %v0, %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vbperm %v0, %v0, %v0
+
+ vbperm %v0, %v0, %v0
+
#CHECK: error: invalid operand
#CHECK: vcdg %v0, %v0, 0, 0, -1
#CHECK: error: invalid operand
@@ -410,6 +508,35 @@
vclgdb %v0, %v0, -1, 0
vclgdb %v0, %v0, 16, 0
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vcp %v0, %v0, 0
+
+ vcp %v0, %v0, 0
+
+#CHECK: vcvb %r0, %v0, 0
+
+ vcvb %r0, %v0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vcvbg %r0, %v0, 0
+
+ vcvbg %r0, %v0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vcvd %v0, %r0, 0, 0
+
+ vcvd %v0, %r0, 0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vcvdg %v0, %r0, 0, 0
+
+ vcvdg %v0, %r0, 0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vdp %v0, %v0, %v0, 0, 0
+
+ vdp %v0, %v0, %v0, 0, 0
+
#CHECK: error: invalid operand
#CHECK: verim %v0, %v0, %v0, 0, -1
#CHECK: error: invalid operand
@@ -828,6 +955,40 @@
vfaezhs %v0, %v0
vfaezhs %v0, %v0, %v0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfasb %v0, %v0, %v0
+
+ vfasb %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfcesb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfcesbs %v0, %v0, %v0
+
+ vfcesb %v0, %v0, %v0
+ vfcesbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfchsb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfchsbs %v0, %v0, %v0
+
+ vfchsb %v0, %v0, %v0
+ vfchsbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfchesb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfchesbs %v0, %v0, %v0
+
+ vfchesb %v0, %v0, %v0
+ vfchesbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfdsb %v0, %v0, %v0
+
+ vfdsb %v0, %v0, %v0
+
#CHECK: error: invalid operand
#CHECK: vfee %v0, %v0, %v0, 0, -1
#CHECK: error: invalid operand
@@ -1130,6 +1291,152 @@
vfidb %v0, %v0, -1, 0
vfidb %v0, %v0, 16, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfisb %v0, %v0, 0, 0
+
+ vfisb %v0, %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkedb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkedbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkesb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkesbs %v0, %v0, %v0
+
+ vfkedb %v0, %v0, %v0
+ vfkedbs %v0, %v0, %v0
+ vfkesb %v0, %v0, %v0
+ vfkesbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkhdb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkhdbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkhsb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkhsbs %v0, %v0, %v0
+
+ vfkhdb %v0, %v0, %v0
+ vfkhdbs %v0, %v0, %v0
+ vfkhsb %v0, %v0, %v0
+ vfkhsbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkhedb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkhedbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkhesb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfkhesbs %v0, %v0, %v0
+
+ vfkhedb %v0, %v0, %v0
+ vfkhedbs %v0, %v0, %v0
+ vfkhesb %v0, %v0, %v0
+ vfkhesbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfpsosb %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vflcsb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vflnsb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vflpsb %v0, %v0
+
+ vfpsosb %v0, %v0, 0
+ vflcsb %v0, %v0
+ vflnsb %v0, %v0
+ vflpsb %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfll %v0, %v0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vflls %v0, %v0
+
+ vfll %v0, %v0, 0, 0
+ vflls %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vflr %v0, %v0, 0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vflrd %v0, %v0, 0, 0
+
+ vflr %v0, %v0, 0, 0, 0
+ vflrd %v0, %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfmax %v0, %v0, %v0, 0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfmaxdb %v0, %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfmaxsb %v0, %v0, %v0, 0
+
+ vfmax %v0, %v0, %v0, 0, 0, 0
+ vfmaxdb %v0, %v0, %v0, 0
+ vfmaxsb %v0, %v0, %v0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfmin %v0, %v0, %v0, 0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfmindb %v0, %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfminsb %v0, %v0, %v0, 0
+
+ vfmin %v0, %v0, %v0, 0, 0, 0
+ vfmindb %v0, %v0, %v0, 0
+ vfminsb %v0, %v0, %v0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfmasb %v0, %v0, %v0, %v0
+
+ vfmasb %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfmsb %v0, %v0, %v0
+
+ vfmsb %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfmssb %v0, %v0, %v0, %v0
+
+ vfmssb %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfnma %v0, %v0, %v0, %v0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfnmadb %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfnmasb %v0, %v0, %v0, %v0
+
+ vfnma %v0, %v0, %v0, %v0, 0, 0
+ vfnmadb %v0, %v0, %v0, %v0
+ vfnmasb %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfnms %v0, %v0, %v0, %v0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfnmsdb %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfnmssb %v0, %v0, %v0, %v0
+
+ vfnms %v0, %v0, %v0, %v0, 0, 0
+ vfnmsdb %v0, %v0, %v0, %v0
+ vfnmssb %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfssb %v0, %v0, %v0
+
+ vfssb %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vfsqsb %v0, %v0
+
+ vfsqsb %v0, %v0
+
#CHECK: error: invalid operand
#CHECK: vftci %v0, %v0, 0, 0, -1
#CHECK: error: invalid operand
@@ -1158,6 +1465,11 @@
vftcidb %v0, %v0, -1
vftcidb %v0, %v0, 4096
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vftcisb %v0, %v0, 0
+
+ vftcisb %v0, %v0, 0
+
#CHECK: error: invalid operand
#CHECK: vgbm %v0, -1
#CHECK: error: invalid operand
@@ -1615,6 +1927,11 @@
vlgvh %r0, %v0, 4096
vlgvh %r0, %v0, 0(%r0)
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vlip %v0, 0, 0
+
+ vlip %v0, 0, 0
+
#CHECK: error: invalid operand
#CHECK: vll %v0, %r0, -1
#CHECK: error: invalid operand
@@ -1687,6 +2004,11 @@
vllezh %v0, 4096
vllezh %v0, 0(%v1,%r2)
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vllezlf %v0, 0
+
+ vllezlf %v0, 0
+
#CHECK: error: invalid operand
#CHECK: vlm %v0, %v0, -1
#CHECK: error: invalid operand
@@ -1756,6 +2078,16 @@
vlreph %v0, 4096
vlreph %v0, 0(%v1,%r2)
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vlrl %v0, 0, 0
+
+ vlrl %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vlrlr %v0, %r0, 0
+
+ vlrlr %v0, %r0, 0
+
#CHECK: error: invalid operand
#CHECK: vlvg %v0, %r0, 0, -1
#CHECK: error: invalid operand
@@ -1817,6 +2149,39 @@
vlvgh %v0, %r0, 4096
vlvgh %v0, %r0, 0(%r0)
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vmp %v0, %v0, %v0, 0, 0
+
+ vmp %v0, %v0, %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vmsl %v0, %v0, %v0, %v0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vmslg %v0, %v0, %v0, %v0, 0
+
+ vmsl %v0, %v0, %v0, %v0, 0, 0
+ vmslg %v0, %v0, %v0, %v0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vmsp %v0, %v0, %v0, 0, 0
+
+ vmsp %v0, %v0, %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vnn %v0, %v0, %v0
+
+ vnn %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vnx %v0, %v0, %v0
+
+ vnx %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: voc %v0, %v0, %v0
+
+ voc %v0, %v0, %v0
+
#CHECK: error: invalid operand
#CHECK: vpdi %v0, %v0, %v0, -1
#CHECK: error: invalid operand
@@ -1825,6 +2190,30 @@
vpdi %v0, %v0, %v0, -1
vpdi %v0, %v0, %v0, 16
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vpkz %v0, 0, 0
+
+ vpkz %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vpopctb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vpopctf %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vpopctg %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: vpopcth %v0, %v0
+
+ vpopctb %v0, %v0
+ vpopctf %v0, %v0
+ vpopctg %v0, %v0
+ vpopcth %v0, %v0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vpsop %v0, %v0, 0, 0, 0
+
+ vpsop %v0, %v0, 0, 0, 0
+
#CHECK: error: invalid operand
#CHECK: vrep %v0, %v0, 0, -1
#CHECK: error: invalid operand
@@ -1917,6 +2306,11 @@
vrepih %v0, -32769
vrepih %v0, 32768
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vrp %v0, %v0, %v0, 0, 0
+
+ vrp %v0, %v0, %v0, 0, 0
+
#CHECK: error: vector index required
#CHECK: vscef %v0, 0(%r1), 0
#CHECK: error: vector index required
@@ -1957,6 +2351,11 @@
vsceg %v0, -1(%v0,%r1), 0
vsceg %v0, 4096(%v0,%r1), 0
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vsdp %v0, %v0, %v0, 0, 0
+
+ vsdp %v0, %v0, %v0, 0, 0
+
#CHECK: error: invalid operand
#CHECK: vsldb %v0, %v0, %v0, -1
#CHECK: error: invalid operand
@@ -1965,6 +2364,16 @@
vsldb %v0, %v0, %v0, -1
vsldb %v0, %v0, %v0, 256
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vsp %v0, %v0, %v0, 0, 0
+
+ vsp %v0, %v0, %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vsrp %v0, %v0, 0, 0, 0
+
+ vsrp %v0, %v0, 0, 0, 0
+
#CHECK: error: invalid operand
#CHECK: vst %v0, -1
#CHECK: error: invalid operand
@@ -2251,6 +2660,26 @@
vstrczhs %v0, %v0, %v0
vstrczhs %v0, %v0, %v0, %v0, 0, 0
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vstrl %v0, 0, 0
+
+ vstrl %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vstrlr %v0, %r0, 0
+
+ vstrlr %v0, %r0, 0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vtp %v0
+
+ vtp %v0
+
+#CHECK: error: instruction requires: vector-packed-decimal
+#CHECK: vupkz %v0, 0, 0
+
+ vupkz %v0, 0, 0
+
#CHECK: error: invalid operand
#CHECK: wcdgb %v0, %v0, 0, -1
#CHECK: error: invalid operand
@@ -2307,6 +2736,72 @@
wclgdb %v0, %v0, -1, 0
wclgdb %v0, %v0, 16, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfasb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfaxb %v0, %v0, %v0
+
+ wfasb %v0, %v0, %v0
+ wfaxb %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfcsb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfcxb %v0, %v0
+
+ wfcsb %v0, %v0
+ wfcxb %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfcesb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfcesbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfcexb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfcexbs %v0, %v0, %v0
+
+ wfcesb %v0, %v0, %v0
+ wfcesbs %v0, %v0, %v0
+ wfcexb %v0, %v0, %v0
+ wfcexbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfchsb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfchsbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfchxb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfchxbs %v0, %v0, %v0
+
+ wfchsb %v0, %v0, %v0
+ wfchsbs %v0, %v0, %v0
+ wfchxb %v0, %v0, %v0
+ wfchxbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfchesb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfchesbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfchexb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfchexbs %v0, %v0, %v0
+
+ wfchesb %v0, %v0, %v0
+ wfchesbs %v0, %v0, %v0
+ wfchexb %v0, %v0, %v0
+ wfchexbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfdsb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfdxb %v0, %v0, %v0
+
+ wfdsb %v0, %v0, %v0
+ wfdxb %v0, %v0, %v0
+
#CHECK: error: invalid operand
#CHECK: wfidb %v0, %v0, 0, -1
#CHECK: error: invalid operand
@@ -2321,6 +2816,208 @@
wfidb %v0, %v0, -1, 0
wfidb %v0, %v0, 16, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfisb %v0, %v0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfixb %v0, %v0, 0, 0
+
+ wfisb %v0, %v0, 0, 0
+ wfixb %v0, %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfksb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkxb %v0, %v0
+
+ wfksb %v0, %v0
+ wfkxb %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkedb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkedbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkesb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkesbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkexb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkexbs %v0, %v0, %v0
+
+ wfkedb %v0, %v0, %v0
+ wfkedbs %v0, %v0, %v0
+ wfkesb %v0, %v0, %v0
+ wfkesbs %v0, %v0, %v0
+ wfkexb %v0, %v0, %v0
+ wfkexbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhdb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhdbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhsb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhsbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhxb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhxbs %v0, %v0, %v0
+
+ wfkhdb %v0, %v0, %v0
+ wfkhdbs %v0, %v0, %v0
+ wfkhsb %v0, %v0, %v0
+ wfkhsbs %v0, %v0, %v0
+ wfkhxb %v0, %v0, %v0
+ wfkhxbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhedb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhedbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhesb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhesbs %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhexb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfkhexbs %v0, %v0, %v0
+
+ wfkhedb %v0, %v0, %v0
+ wfkhedbs %v0, %v0, %v0
+ wfkhesb %v0, %v0, %v0
+ wfkhesbs %v0, %v0, %v0
+ wfkhexb %v0, %v0, %v0
+ wfkhexbs %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfpsosb %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfpsoxb %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflcsb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflcxb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflnsb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflnxb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflpsb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflpxb %v0, %v0
+
+ wfpsosb %v0, %v0, 0
+ wfpsoxb %v0, %v0, 0
+ wflcsb %v0, %v0
+ wflcxb %v0, %v0
+ wflnsb %v0, %v0
+ wflnxb %v0, %v0
+ wflpsb %v0, %v0
+ wflpxb %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflls %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflld %v0, %v0
+
+ wflls %v0, %v0
+ wflld %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflrd %v0, %v0, 0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wflrx %v0, %v0, 0, 0
+
+ wflrd %v0, %v0, 0, 0
+ wflrx %v0, %v0, 0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmaxdb %v0, %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmaxsb %v0, %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmaxxb %v0, %v0, %v0, 0
+
+ wfmaxdb %v0, %v0, %v0, 0
+ wfmaxsb %v0, %v0, %v0, 0
+ wfmaxxb %v0, %v0, %v0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmindb %v0, %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfminsb %v0, %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfminxb %v0, %v0, %v0, 0
+
+ wfmindb %v0, %v0, %v0, 0
+ wfminsb %v0, %v0, %v0, 0
+ wfminxb %v0, %v0, %v0, 0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmasb %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmaxb %v0, %v0, %v0, %v0
+
+ wfmasb %v0, %v0, %v0, %v0
+ wfmaxb %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmsb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmxb %v0, %v0, %v0
+
+ wfmsb %v0, %v0, %v0
+ wfmxb %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmssb %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfmsxb %v0, %v0, %v0, %v0
+
+ wfmssb %v0, %v0, %v0, %v0
+ wfmsxb %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfnmadb %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfnmasb %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfnmaxb %v0, %v0, %v0, %v0
+
+ wfnmadb %v0, %v0, %v0, %v0
+ wfnmasb %v0, %v0, %v0, %v0
+ wfnmaxb %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfnmsdb %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfnmssb %v0, %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfnmsxb %v0, %v0, %v0, %v0
+
+ wfnmsdb %v0, %v0, %v0, %v0
+ wfnmssb %v0, %v0, %v0, %v0
+ wfnmsxb %v0, %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfssb %v0, %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfsxb %v0, %v0, %v0
+
+ wfssb %v0, %v0, %v0
+ wfsxb %v0, %v0, %v0
+
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfsqsb %v0, %v0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wfsqxb %v0, %v0
+
+ wfsqsb %v0, %v0
+ wfsqxb %v0, %v0
+
#CHECK: error: invalid operand
#CHECK: wftcidb %v0, %v0, -1
#CHECK: error: invalid operand
@@ -2329,6 +3026,14 @@
wftcidb %v0, %v0, -1
wftcidb %v0, %v0, 4096
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wftcisb %v0, %v0, 0
+#CHECK: error: instruction requires: vector-enhancements-1
+#CHECK: wftcixb %v0, %v0, 0
+
+ wftcisb %v0, %v0, 0
+ wftcixb %v0, %v0, 0
+
#CHECK: error: invalid operand
#CHECK: wledb %v0, %v0, 0, -1
#CHECK: error: invalid operand
diff --git a/test/MC/SystemZ/insn-bad-z14.s b/test/MC/SystemZ/insn-bad-z14.s
new file mode 100644
index 0000000000000..8bc736a7a1a4c
--- /dev/null
+++ b/test/MC/SystemZ/insn-bad-z14.s
@@ -0,0 +1,752 @@
+# For z14 only.
+# RUN: not llvm-mc -triple s390x-linux-gnu -mcpu=z14 < %s 2> %t
+# RUN: FileCheck < %t %s
+# RUN: not llvm-mc -triple s390x-linux-gnu -mcpu=arch12 < %s 2> %t
+# RUN: FileCheck < %t %s
+
+#CHECK: error: invalid operand
+#CHECK: bi -524289
+#CHECK: error: invalid operand
+#CHECK: bi 524288
+
+ bi -524289
+ bi 524288
+
+#CHECK: error: invalid operand
+#CHECK: bic -1, 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: bic 16, 0(%r1)
+#CHECK: error: invalid operand
+#CHECK: bic 0, -524289
+#CHECK: error: invalid operand
+#CHECK: bic 0, 524288
+
+ bic -1, 0(%r1)
+ bic 16, 0(%r1)
+ bic 0, -524289
+ bic 0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: agh %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: agh %r0, 524288
+
+ agh %r0, -524289
+ agh %r0, 524288
+
+#CHECK: error: invalid register pair
+#CHECK: kma %r1, %r2, %r4
+#CHECK: error: invalid register pair
+#CHECK: kma %r2, %r1, %r4
+#CHECK: error: invalid register pair
+#CHECK: kma %r2, %r4, %r1
+
+ kma %r1, %r2, %r4
+ kma %r2, %r1, %r4
+ kma %r2, %r4, %r1
+
+#CHECK: error: invalid operand
+#CHECK: lgg %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: lgg %r0, 524288
+
+ lgg %r0, -524289
+ lgg %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: lgsc %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: lgsc %r0, 524288
+
+ lgsc %r0, -524289
+ lgsc %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: llgfsg %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: llgfsg %r0, 524288
+
+ llgfsg %r0, -524289
+ llgfsg %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: mg %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: mg %r0, 524288
+#CHECK: error: invalid register pair
+#CHECK: mg %r1, 0
+
+ mg %r0, -524289
+ mg %r0, 524288
+ mg %r1, 0
+
+#CHECK: error: invalid operand
+#CHECK: mgh %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: mgh %r0, 524288
+
+ mgh %r0, -524289
+ mgh %r0, 524288
+
+#CHECK: error: invalid register pair
+#CHECK: mgrk %r1, %r0, %r0
+
+ mgrk %r1, %r0, %r0
+
+#CHECK: error: invalid operand
+#CHECK: msc %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: msc %r0, 524288
+
+ msc %r0, -524289
+ msc %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: msgc %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: msgc %r0, 524288
+
+ msgc %r0, -524289
+ msgc %r0, 524288
+
+#CHECK: error: invalid register pair
+#CHECK: prno %r1, %r2
+#CHECK: error: invalid register pair
+#CHECK: prno %r2, %r1
+
+ prno %r1, %r2
+ prno %r2, %r1
+
+#CHECK: error: invalid operand
+#CHECK: sgh %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: sgh %r0, 524288
+
+ sgh %r0, -524289
+ sgh %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: stgsc %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: stgsc %r0, 524288
+
+ stgsc %r0, -524289
+ stgsc %r0, 524288
+
+#CHECK: error: invalid operand
+#CHECK: vap %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vap %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vap %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vap %v0, %v0, %v0, 256, 0
+
+ vap %v0, %v0, %v0, 0, -1
+ vap %v0, %v0, %v0, 0, 16
+ vap %v0, %v0, %v0, -1, 0
+ vap %v0, %v0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vcp %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vcp %v0, %v0, 16
+
+ vcp %v0, %v0, -1
+ vcp %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: vcvb %r0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vcvb %r0, %v0, 16
+
+ vcvb %r0, %v0, -1
+ vcvb %r0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: vcvbg %r0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vcvbg %r0, %v0, 16
+
+ vcvbg %r0, %v0, -1
+ vcvbg %r0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: vcvd %r0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vcvd %r0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vcvd %r0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vcvd %r0, %v0, 256, 0
+
+ vcvd %r0, %v0, 0, -1
+ vcvd %r0, %v0, 0, 16
+ vcvd %r0, %v0, -1, 0
+ vcvd %r0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vcvdg %r0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vcvdg %r0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vcvdg %r0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vcvdg %r0, %v0, 256, 0
+
+ vcvdg %r0, %v0, 0, -1
+ vcvdg %r0, %v0, 0, 16
+ vcvdg %r0, %v0, -1, 0
+ vcvdg %r0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vdp %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vdp %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vdp %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vdp %v0, %v0, %v0, 256, 0
+
+ vdp %v0, %v0, %v0, 0, -1
+ vdp %v0, %v0, %v0, 0, 16
+ vdp %v0, %v0, %v0, -1, 0
+ vdp %v0, %v0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfisb %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vfisb %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vfisb %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vfisb %v0, %v0, 16, 0
+
+ vfisb %v0, %v0, 0, -1
+ vfisb %v0, %v0, 0, 16
+ vfisb %v0, %v0, -1, 0
+ vfisb %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfll %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vfll %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vfll %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vfll %v0, %v0, 16, 0
+
+ vfll %v0, %v0, 0, -1
+ vfll %v0, %v0, 0, 16
+ vfll %v0, %v0, -1, 0
+ vfll %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: vflr %v0, %v0, 0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vflr %v0, %v0, 0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vflr %v0, %v0, 0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vflr %v0, %v0, 0, 16, 0
+#CHECK: error: invalid operand
+#CHECK: vflr %v0, %v0, -1, 0, 0
+#CHECK: error: invalid operand
+#CHECK: vflr %v0, %v0, 16, 0, 0
+
+ vflr %v0, %v0, 0, 0, -1
+ vflr %v0, %v0, 0, 0, 16
+ vflr %v0, %v0, 0, -1, 0
+ vflr %v0, %v0, 0, 16, 0
+ vflr %v0, %v0, -1, 0, 0
+ vflr %v0, %v0, 16, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vflrd %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vflrd %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vflrd %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vflrd %v0, %v0, 16, 0
+
+ vflrd %v0, %v0, 0, -1
+ vflrd %v0, %v0, 0, 16
+ vflrd %v0, %v0, -1, 0
+ vflrd %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfmax %v0, %v0, %v0, 0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vfmax %v0, %v0, %v0, 0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vfmax %v0, %v0, %v0, 0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vfmax %v0, %v0, %v0, 0, 16, 0
+#CHECK: error: invalid operand
+#CHECK: vfmax %v0, %v0, %v0, -1, 0, 0
+#CHECK: error: invalid operand
+#CHECK: vfmax %v0, %v0, %v0, 16, 0, 0
+
+ vfmax %v0, %v0, %v0, 0, 0, -1
+ vfmax %v0, %v0, %v0, 0, 0, 16
+ vfmax %v0, %v0, %v0, 0, -1, 0
+ vfmax %v0, %v0, %v0, 0, 16, 0
+ vfmax %v0, %v0, %v0, -1, 0, 0
+ vfmax %v0, %v0, %v0, 16, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfmaxdb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfmaxdb %v0, %v0, %v0, 16
+
+ vfmaxdb %v0, %v0, %v0, -1
+ vfmaxdb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: vfmaxsb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfmaxsb %v0, %v0, %v0, 16
+
+ vfmaxsb %v0, %v0, %v0, -1
+ vfmaxsb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: vfmin %v0, %v0, %v0, 0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vfmin %v0, %v0, %v0, 0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vfmin %v0, %v0, %v0, 0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vfmin %v0, %v0, %v0, 0, 16, 0
+#CHECK: error: invalid operand
+#CHECK: vfmin %v0, %v0, %v0, -1, 0, 0
+#CHECK: error: invalid operand
+#CHECK: vfmin %v0, %v0, %v0, 16, 0, 0
+
+ vfmin %v0, %v0, %v0, 0, 0, -1
+ vfmin %v0, %v0, %v0, 0, 0, 16
+ vfmin %v0, %v0, %v0, 0, -1, 0
+ vfmin %v0, %v0, %v0, 0, 16, 0
+ vfmin %v0, %v0, %v0, -1, 0, 0
+ vfmin %v0, %v0, %v0, 16, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfmindb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfmindb %v0, %v0, %v0, 16
+
+ vfmindb %v0, %v0, %v0, -1
+ vfmindb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: vfminsb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vfminsb %v0, %v0, %v0, 16
+
+ vfminsb %v0, %v0, %v0, -1
+ vfminsb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: vfnma %v0, %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vfnma %v0, %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vfnma %v0, %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vfnma %v0, %v0, %v0, %v0, 16, 0
+
+ vfnma %v0, %v0, %v0, %v0, 0, -1
+ vfnma %v0, %v0, %v0, %v0, 0, 16
+ vfnma %v0, %v0, %v0, %v0, -1, 0
+ vfnma %v0, %v0, %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: vfnms %v0, %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vfnms %v0, %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vfnms %v0, %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vfnms %v0, %v0, %v0, %v0, 16, 0
+
+ vfnms %v0, %v0, %v0, %v0, 0, -1
+ vfnms %v0, %v0, %v0, %v0, 0, 16
+ vfnms %v0, %v0, %v0, %v0, -1, 0
+ vfnms %v0, %v0, %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: vftcisb %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vftcisb %v0, %v0, 4096
+
+ vftcisb %v0, %v0, -1
+ vftcisb %v0, %v0, 4096
+
+#CHECK: error: invalid operand
+#CHECK: vlip %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vlip %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vlip %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vlip %v0, 65536, 0
+
+ vlip %v0, 0, -1
+ vlip %v0, 0, 16
+ vlip %v0, -1, 0
+ vlip %v0, 65536, 0
+
+#CHECK: error: invalid operand
+#CHECK: vllezlf %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vllezlf %v0, 4096
+#CHECK: error: invalid use of vector addressing
+#CHECK: vllezlf %v0, 0(%v1,%r2)
+
+ vllezlf %v0, -1
+ vllezlf %v0, 4096
+ vllezlf %v0, 0(%v1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: vlrl %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vlrl %v0, 0, 256
+#CHECK: error: invalid operand
+#CHECK: vlrl %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vlrl %v0, 4096, 0
+#CHECK: error: %r0 used in an address
+#CHECK: vlrl %v0, 0(%r0), 0
+
+ vlrl %v0, 0, -1
+ vlrl %v0, 0, 256
+ vlrl %v0, -1, 0
+ vlrl %v0, 4096, 0
+ vlrl %v0, 0(%r0), 0
+
+#CHECK: error: invalid operand
+#CHECK: vlrlr %v0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: vlrlr %v0, %r0, 4096
+#CHECK: error: %r0 used in an address
+#CHECK: vlrlr %v0, %r0, 0(%r0)
+
+ vlrlr %v0, %r0, -1
+ vlrlr %v0, %r0, 4096
+ vlrlr %v0, %r0, 0(%r0)
+
+#CHECK: error: invalid operand
+#CHECK: vmp %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vmp %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vmp %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vmp %v0, %v0, %v0, 256, 0
+
+ vmp %v0, %v0, %v0, 0, -1
+ vmp %v0, %v0, %v0, 0, 16
+ vmp %v0, %v0, %v0, -1, 0
+ vmp %v0, %v0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vmsp %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vmsp %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vmsp %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vmsp %v0, %v0, %v0, 256, 0
+
+ vmsp %v0, %v0, %v0, 0, -1
+ vmsp %v0, %v0, %v0, 0, 16
+ vmsp %v0, %v0, %v0, -1, 0
+ vmsp %v0, %v0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vmsl %v0, %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vmsl %v0, %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vmsl %v0, %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vmsl %v0, %v0, %v0, %v0, 16, 0
+
+ vmsl %v0, %v0, %v0, %v0, 0, -1
+ vmsl %v0, %v0, %v0, %v0, 0, 16
+ vmsl %v0, %v0, %v0, %v0, -1, 0
+ vmsl %v0, %v0, %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: vmslg %v0, %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: vmslg %v0, %v0, %v0, %v0, 16
+
+ vmslg %v0, %v0, %v0, %v0, -1
+ vmslg %v0, %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: vpkz %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vpkz %v0, 0, 256
+#CHECK: error: invalid operand
+#CHECK: vpkz %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vpkz %v0, 4096, 0
+#CHECK: error: %r0 used in an address
+#CHECK: vpkz %v0, 0(%r0), 0
+
+ vpkz %v0, 0, -1
+ vpkz %v0, 0, 256
+ vpkz %v0, -1, 0
+ vpkz %v0, 4096, 0
+ vpkz %v0, 0(%r0), 0
+
+#CHECK: error: invalid operand
+#CHECK: vpsop %v0, %v0, 0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vpsop %v0, %v0, 0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vpsop %v0, %v0, 0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vpsop %v0, %v0, 0, 256, 0
+#CHECK: error: invalid operand
+#CHECK: vpsop %v0, %v0, -1, 0, 0
+#CHECK: error: invalid operand
+#CHECK: vpsop %v0, %v0, 256, 0, 0
+
+ vpsop %v0, %v0, 0, 0, -1
+ vpsop %v0, %v0, 0, 0, 16
+ vpsop %v0, %v0, 0, -1, 0
+ vpsop %v0, %v0, 0, 256, 0
+ vpsop %v0, %v0, -1, 0, 0
+ vpsop %v0, %v0, 256, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vrp %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vrp %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vrp %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vrp %v0, %v0, %v0, 256, 0
+
+ vrp %v0, %v0, %v0, 0, -1
+ vrp %v0, %v0, %v0, 0, 16
+ vrp %v0, %v0, %v0, -1, 0
+ vrp %v0, %v0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vsdp %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vsdp %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vsdp %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vsdp %v0, %v0, %v0, 256, 0
+
+ vsdp %v0, %v0, %v0, 0, -1
+ vsdp %v0, %v0, %v0, 0, 16
+ vsdp %v0, %v0, %v0, -1, 0
+ vsdp %v0, %v0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vsp %v0, %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vsp %v0, %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vsp %v0, %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vsp %v0, %v0, %v0, 256, 0
+
+ vsp %v0, %v0, %v0, 0, -1
+ vsp %v0, %v0, %v0, 0, 16
+ vsp %v0, %v0, %v0, -1, 0
+ vsp %v0, %v0, %v0, 256, 0
+
+#CHECK: error: invalid operand
+#CHECK: vsrp %v0, %v0, 0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vsrp %v0, %v0, 0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: vsrp %v0, %v0, 0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vsrp %v0, %v0, 0, 256, 0
+#CHECK: error: invalid operand
+#CHECK: vsrp %v0, %v0, -1, 0, 0
+#CHECK: error: invalid operand
+#CHECK: vsrp %v0, %v0, 256, 0, 0
+
+ vsrp %v0, %v0, 0, 0, -1
+ vsrp %v0, %v0, 0, 0, 16
+ vsrp %v0, %v0, 0, -1, 0
+ vsrp %v0, %v0, 0, 256, 0
+ vsrp %v0, %v0, -1, 0, 0
+ vsrp %v0, %v0, 256, 0, 0
+
+#CHECK: error: invalid operand
+#CHECK: vstrl %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vstrl %v0, 0, 256
+#CHECK: error: invalid operand
+#CHECK: vstrl %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vstrl %v0, 4096, 0
+#CHECK: error: %r0 used in an address
+#CHECK: vstrl %v0, 0(%r0), 0
+
+ vstrl %v0, 0, -1
+ vstrl %v0, 0, 256
+ vstrl %v0, -1, 0
+ vstrl %v0, 4096, 0
+ vstrl %v0, 0(%r0), 0
+
+#CHECK: error: invalid operand
+#CHECK: vstrlr %v0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: vstrlr %v0, %r0, 4096
+#CHECK: error: %r0 used in an address
+#CHECK: vstrlr %v0, %r0, 0(%r0)
+
+ vstrlr %v0, %r0, -1
+ vstrlr %v0, %r0, 4096
+ vstrlr %v0, %r0, 0(%r0)
+
+#CHECK: error: invalid operand
+#CHECK: vupkz %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vupkz %v0, 0, 256
+#CHECK: error: invalid operand
+#CHECK: vupkz %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: vupkz %v0, 4096, 0
+#CHECK: error: %r0 used in an address
+#CHECK: vupkz %v0, 0(%r0), 0
+
+ vupkz %v0, 0, -1
+ vupkz %v0, 0, 256
+ vupkz %v0, -1, 0
+ vupkz %v0, 4096, 0
+ vupkz %v0, 0(%r0), 0
+
+#CHECK: error: invalid operand
+#CHECK: wfisb %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: wfisb %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: wfisb %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: wfisb %v0, %v0, 16, 0
+
+ wfisb %v0, %v0, 0, -1
+ wfisb %v0, %v0, 0, 16
+ wfisb %v0, %v0, -1, 0
+ wfisb %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: wfixb %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: wfixb %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: wfixb %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: wfixb %v0, %v0, 16, 0
+
+ wfixb %v0, %v0, 0, -1
+ wfixb %v0, %v0, 0, 16
+ wfixb %v0, %v0, -1, 0
+ wfixb %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: wflrd %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: wflrd %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: wflrd %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: wflrd %v0, %v0, 16, 0
+
+ wflrd %v0, %v0, 0, -1
+ wflrd %v0, %v0, 0, 16
+ wflrd %v0, %v0, -1, 0
+ wflrd %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: wflrx %v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: wflrx %v0, %v0, 0, 16
+#CHECK: error: invalid operand
+#CHECK: wflrx %v0, %v0, -1, 0
+#CHECK: error: invalid operand
+#CHECK: wflrx %v0, %v0, 16, 0
+
+ wflrx %v0, %v0, 0, -1
+ wflrx %v0, %v0, 0, 16
+ wflrx %v0, %v0, -1, 0
+ wflrx %v0, %v0, 16, 0
+
+#CHECK: error: invalid operand
+#CHECK: wfmaxdb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: wfmaxdb %v0, %v0, %v0, 16
+
+ wfmaxdb %v0, %v0, %v0, -1
+ wfmaxdb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: wfmaxsb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: wfmaxsb %v0, %v0, %v0, 16
+
+ wfmaxsb %v0, %v0, %v0, -1
+ wfmaxsb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: wfmaxxb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: wfmaxxb %v0, %v0, %v0, 16
+
+ wfmaxxb %v0, %v0, %v0, -1
+ wfmaxxb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: wfmindb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: wfmindb %v0, %v0, %v0, 16
+
+ wfmindb %v0, %v0, %v0, -1
+ wfmindb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: wfminsb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: wfminsb %v0, %v0, %v0, 16
+
+ wfminsb %v0, %v0, %v0, -1
+ wfminsb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: wfminxb %v0, %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: wfminxb %v0, %v0, %v0, 16
+
+ wfminxb %v0, %v0, %v0, -1
+ wfminxb %v0, %v0, %v0, 16
+
+#CHECK: error: invalid operand
+#CHECK: wftcisb %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: wftcisb %v0, %v0, 4096
+
+ wftcisb %v0, %v0, -1
+ wftcisb %v0, %v0, 4096
+
+#CHECK: error: invalid operand
+#CHECK: wftcixb %v0, %v0, -1
+#CHECK: error: invalid operand
+#CHECK: wftcixb %v0, %v0, 4096
+
+ wftcixb %v0, %v0, -1
+ wftcixb %v0, %v0, 4096
+
diff --git a/test/MC/SystemZ/insn-good-z14.s b/test/MC/SystemZ/insn-good-z14.s
new file mode 100644
index 0000000000000..1fcdcb4ccab08
--- /dev/null
+++ b/test/MC/SystemZ/insn-good-z14.s
@@ -0,0 +1,2674 @@
+# For z14 and above.
+# RUN: llvm-mc -triple s390x-linux-gnu -mcpu=z14 -show-encoding %s \
+# RUN: | FileCheck %s
+# RUN: llvm-mc -triple s390x-linux-gnu -mcpu=arch12 -show-encoding %s \
+# RUN: | FileCheck %s
+
+#CHECK: agh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x38]
+#CHECK: agh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x38]
+#CHECK: agh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x38]
+#CHECK: agh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x38]
+#CHECK: agh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x38]
+#CHECK: agh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x38]
+#CHECK: agh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x38]
+#CHECK: agh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x38]
+#CHECK: agh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x38]
+#CHECK: agh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x38]
+
+ agh %r0, -524288
+ agh %r0, -1
+ agh %r0, 0
+ agh %r0, 1
+ agh %r0, 524287
+ agh %r0, 0(%r1)
+ agh %r0, 0(%r15)
+ agh %r0, 524287(%r1,%r15)
+ agh %r0, 524287(%r15,%r1)
+ agh %r15, 0
+
+#CHECK: bi -524288 # encoding: [0xe3,0xf0,0x00,0x00,0x80,0x47]
+#CHECK: bi -1 # encoding: [0xe3,0xf0,0x0f,0xff,0xff,0x47]
+#CHECK: bi 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x47]
+#CHECK: bi 1 # encoding: [0xe3,0xf0,0x00,0x01,0x00,0x47]
+#CHECK: bi 524287 # encoding: [0xe3,0xf0,0x0f,0xff,0x7f,0x47]
+#CHECK: bi 0(%r1) # encoding: [0xe3,0xf0,0x10,0x00,0x00,0x47]
+#CHECK: bi 0(%r15) # encoding: [0xe3,0xf0,0xf0,0x00,0x00,0x47]
+#CHECK: bi 524287(%r1,%r15) # encoding: [0xe3,0xf1,0xff,0xff,0x7f,0x47]
+#CHECK: bi 524287(%r15,%r1) # encoding: [0xe3,0xff,0x1f,0xff,0x7f,0x47]
+
+ bi -524288
+ bi -1
+ bi 0
+ bi 1
+ bi 524287
+ bi 0(%r1)
+ bi 0(%r15)
+ bi 524287(%r1,%r15)
+ bi 524287(%r15,%r1)
+
+#CHECK: bic 0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x47]
+#CHECK: bic 0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x47]
+#CHECK: bic 0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x47]
+#CHECK: bic 0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x47]
+#CHECK: bic 0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x47]
+#CHECK: bic 0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x47]
+#CHECK: bic 0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x47]
+#CHECK: bic 0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x47]
+#CHECK: bic 0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x47]
+#CHECK: bic 15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x47]
+
+ bic 0, -524288
+ bic 0, -1
+ bic 0, 0
+ bic 0, 1
+ bic 0, 524287
+ bic 0, 0(%r1)
+ bic 0, 0(%r15)
+ bic 0, 524287(%r1,%r15)
+ bic 0, 524287(%r15,%r1)
+ bic 15, 0
+
+#CHECK: bic 1, 0(%r7) # encoding: [0xe3,0x10,0x70,0x00,0x00,0x47]
+#CHECK: bio 0(%r15) # encoding: [0xe3,0x10,0xf0,0x00,0x00,0x47]
+
+ bic 1, 0(%r7)
+ bio 0(%r15)
+
+#CHECK: bic 2, 0(%r7) # encoding: [0xe3,0x20,0x70,0x00,0x00,0x47]
+#CHECK: bih 0(%r15) # encoding: [0xe3,0x20,0xf0,0x00,0x00,0x47]
+
+ bic 2, 0(%r7)
+ bih 0(%r15)
+
+#CHECK: bic 3, 0(%r7) # encoding: [0xe3,0x30,0x70,0x00,0x00,0x47]
+#CHECK: binle 0(%r15) # encoding: [0xe3,0x30,0xf0,0x00,0x00,0x47]
+
+ bic 3, 0(%r7)
+ binle 0(%r15)
+
+#CHECK: bic 4, 0(%r7) # encoding: [0xe3,0x40,0x70,0x00,0x00,0x47]
+#CHECK: bil 0(%r15) # encoding: [0xe3,0x40,0xf0,0x00,0x00,0x47]
+
+ bic 4, 0(%r7)
+ bil 0(%r15)
+
+#CHECK: bic 5, 0(%r7) # encoding: [0xe3,0x50,0x70,0x00,0x00,0x47]
+#CHECK: binhe 0(%r15) # encoding: [0xe3,0x50,0xf0,0x00,0x00,0x47]
+
+ bic 5, 0(%r7)
+ binhe 0(%r15)
+
+#CHECK: bic 6, 0(%r7) # encoding: [0xe3,0x60,0x70,0x00,0x00,0x47]
+#CHECK: bilh 0(%r15) # encoding: [0xe3,0x60,0xf0,0x00,0x00,0x47]
+
+ bic 6, 0(%r7)
+ bilh 0(%r15)
+
+#CHECK: bic 7, 0(%r7) # encoding: [0xe3,0x70,0x70,0x00,0x00,0x47]
+#CHECK: bine 0(%r15) # encoding: [0xe3,0x70,0xf0,0x00,0x00,0x47]
+
+ bic 7, 0(%r7)
+ bine 0(%r15)
+
+#CHECK: bic 8, 0(%r7) # encoding: [0xe3,0x80,0x70,0x00,0x00,0x47]
+#CHECK: bie 0(%r15) # encoding: [0xe3,0x80,0xf0,0x00,0x00,0x47]
+
+ bic 8, 0(%r7)
+ bie 0(%r15)
+
+#CHECK: bic 9, 0(%r7) # encoding: [0xe3,0x90,0x70,0x00,0x00,0x47]
+#CHECK: binlh 0(%r15) # encoding: [0xe3,0x90,0xf0,0x00,0x00,0x47]
+
+ bic 9, 0(%r7)
+ binlh 0(%r15)
+
+#CHECK: bic 10, 0(%r7) # encoding: [0xe3,0xa0,0x70,0x00,0x00,0x47]
+#CHECK: bihe 0(%r15) # encoding: [0xe3,0xa0,0xf0,0x00,0x00,0x47]
+
+ bic 10, 0(%r7)
+ bihe 0(%r15)
+
+#CHECK: bic 11, 0(%r7) # encoding: [0xe3,0xb0,0x70,0x00,0x00,0x47]
+#CHECK: binl 0(%r15) # encoding: [0xe3,0xb0,0xf0,0x00,0x00,0x47]
+
+ bic 11, 0(%r7)
+ binl 0(%r15)
+
+#CHECK: bic 12, 0(%r7) # encoding: [0xe3,0xc0,0x70,0x00,0x00,0x47]
+#CHECK: bile 0(%r15) # encoding: [0xe3,0xc0,0xf0,0x00,0x00,0x47]
+
+ bic 12, 0(%r7)
+ bile 0(%r15)
+
+#CHECK: bic 13, 0(%r7) # encoding: [0xe3,0xd0,0x70,0x00,0x00,0x47]
+#CHECK: binh 0(%r15) # encoding: [0xe3,0xd0,0xf0,0x00,0x00,0x47]
+
+ bic 13, 0(%r7)
+ binh 0(%r15)
+
+#CHECK: bic 14, 0(%r7) # encoding: [0xe3,0xe0,0x70,0x00,0x00,0x47]
+#CHECK: bino 0(%r15) # encoding: [0xe3,0xe0,0xf0,0x00,0x00,0x47]
+
+ bic 14, 0(%r7)
+ bino 0(%r15)
+
+#CHECK: irbm %r0, %r0 # encoding: [0xb9,0xac,0x00,0x00]
+#CHECK: irbm %r0, %r15 # encoding: [0xb9,0xac,0x00,0x0f]
+#CHECK: irbm %r15, %r0 # encoding: [0xb9,0xac,0x00,0xf0]
+#CHECK: irbm %r7, %r8 # encoding: [0xb9,0xac,0x00,0x78]
+#CHECK: irbm %r15, %r15 # encoding: [0xb9,0xac,0x00,0xff]
+
+ irbm %r0,%r0
+ irbm %r0,%r15
+ irbm %r15,%r0
+ irbm %r7,%r8
+ irbm %r15,%r15
+
+#CHECK: kma %r2, %r2, %r2 # encoding: [0xb9,0x29,0x20,0x22]
+#CHECK: kma %r2, %r8, %r14 # encoding: [0xb9,0x29,0x80,0x2e]
+#CHECK: kma %r14, %r8, %r2 # encoding: [0xb9,0x29,0x80,0xe2]
+#CHECK: kma %r6, %r8, %r10 # encoding: [0xb9,0x29,0x80,0x6a]
+
+ kma %r2, %r2, %r2
+ kma %r2, %r8, %r14
+ kma %r14, %r8, %r2
+ kma %r6, %r8, %r10
+
+#CHECK: lgg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x4c]
+#CHECK: lgg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x4c]
+#CHECK: lgg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x4c]
+#CHECK: lgg %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x4c]
+#CHECK: lgg %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x4c]
+#CHECK: lgg %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x4c]
+#CHECK: lgg %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x4c]
+#CHECK: lgg %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x4c]
+#CHECK: lgg %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x4c]
+#CHECK: lgg %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x4c]
+
+ lgg %r0, -524288
+ lgg %r0, -1
+ lgg %r0, 0
+ lgg %r0, 1
+ lgg %r0, 524287
+ lgg %r0, 0(%r1)
+ lgg %r0, 0(%r15)
+ lgg %r0, 524287(%r1,%r15)
+ lgg %r0, 524287(%r15,%r1)
+ lgg %r15, 0
+
+#CHECK: lgsc %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x4d]
+#CHECK: lgsc %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x4d]
+#CHECK: lgsc %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x4d]
+#CHECK: lgsc %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x4d]
+#CHECK: lgsc %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x4d]
+#CHECK: lgsc %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x4d]
+#CHECK: lgsc %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x4d]
+#CHECK: lgsc %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x4d]
+#CHECK: lgsc %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x4d]
+
+ lgsc %r0, -524288
+ lgsc %r0, -1
+ lgsc %r0, 0
+ lgsc %r0, 1
+ lgsc %r0, 524287
+ lgsc %r0, 0(%r1)
+ lgsc %r0, 0(%r15)
+ lgsc %r0, 524287(%r1,%r15)
+ lgsc %r0, 524287(%r15,%r1)
+
+#CHECK: llgfsg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x48]
+#CHECK: llgfsg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x48]
+#CHECK: llgfsg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x48]
+#CHECK: llgfsg %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x48]
+#CHECK: llgfsg %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x48]
+#CHECK: llgfsg %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x48]
+#CHECK: llgfsg %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x48]
+#CHECK: llgfsg %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x48]
+#CHECK: llgfsg %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x48]
+#CHECK: llgfsg %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x48]
+
+ llgfsg %r0, -524288
+ llgfsg %r0, -1
+ llgfsg %r0, 0
+ llgfsg %r0, 1
+ llgfsg %r0, 524287
+ llgfsg %r0, 0(%r1)
+ llgfsg %r0, 0(%r15)
+ llgfsg %r0, 524287(%r1,%r15)
+ llgfsg %r0, 524287(%r15,%r1)
+ llgfsg %r15, 0
+
+#CHECK: mg %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x84]
+#CHECK: mg %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x84]
+#CHECK: mg %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x84]
+#CHECK: mg %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x84]
+#CHECK: mg %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x84]
+#CHECK: mg %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x84]
+#CHECK: mg %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x84]
+#CHECK: mg %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x84]
+#CHECK: mg %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x84]
+#CHECK: mg %r14, 0 # encoding: [0xe3,0xe0,0x00,0x00,0x00,0x84]
+
+ mg %r0, -524288
+ mg %r0, -1
+ mg %r0, 0
+ mg %r0, 1
+ mg %r0, 524287
+ mg %r0, 0(%r1)
+ mg %r0, 0(%r15)
+ mg %r0, 524287(%r1,%r15)
+ mg %r0, 524287(%r15,%r1)
+ mg %r14, 0
+
+#CHECK: mgh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x3c]
+#CHECK: mgh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x3c]
+#CHECK: mgh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x3c]
+#CHECK: mgh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x3c]
+#CHECK: mgh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x3c]
+#CHECK: mgh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x3c]
+#CHECK: mgh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x3c]
+#CHECK: mgh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x3c]
+#CHECK: mgh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x3c]
+#CHECK: mgh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x3c]
+
+ mgh %r0, -524288
+ mgh %r0, -1
+ mgh %r0, 0
+ mgh %r0, 1
+ mgh %r0, 524287
+ mgh %r0, 0(%r1)
+ mgh %r0, 0(%r15)
+ mgh %r0, 524287(%r1,%r15)
+ mgh %r0, 524287(%r15,%r1)
+ mgh %r15, 0
+
+#CHECK: mgrk %r0, %r0, %r0 # encoding: [0xb9,0xec,0x00,0x00]
+#CHECK: mgrk %r0, %r0, %r15 # encoding: [0xb9,0xec,0xf0,0x00]
+#CHECK: mgrk %r0, %r15, %r0 # encoding: [0xb9,0xec,0x00,0x0f]
+#CHECK: mgrk %r14, %r0, %r0 # encoding: [0xb9,0xec,0x00,0xe0]
+#CHECK: mgrk %r6, %r8, %r9 # encoding: [0xb9,0xec,0x90,0x68]
+
+ mgrk %r0,%r0,%r0
+ mgrk %r0,%r0,%r15
+ mgrk %r0,%r15,%r0
+ mgrk %r14,%r0,%r0
+ mgrk %r6,%r8,%r9
+
+#CHECK: msc %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x53]
+#CHECK: msc %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x53]
+#CHECK: msc %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x53]
+#CHECK: msc %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x53]
+#CHECK: msc %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x53]
+#CHECK: msc %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x53]
+#CHECK: msc %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x53]
+#CHECK: msc %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x53]
+#CHECK: msc %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x53]
+#CHECK: msc %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x53]
+
+ msc %r0, -524288
+ msc %r0, -1
+ msc %r0, 0
+ msc %r0, 1
+ msc %r0, 524287
+ msc %r0, 0(%r1)
+ msc %r0, 0(%r15)
+ msc %r0, 524287(%r1,%r15)
+ msc %r0, 524287(%r15,%r1)
+ msc %r15, 0
+
+#CHECK: msgc %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x83]
+#CHECK: msgc %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x83]
+#CHECK: msgc %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x83]
+#CHECK: msgc %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x83]
+#CHECK: msgc %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x83]
+#CHECK: msgc %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x83]
+#CHECK: msgc %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x83]
+#CHECK: msgc %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x83]
+#CHECK: msgc %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x83]
+#CHECK: msgc %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x83]
+
+ msgc %r0, -524288
+ msgc %r0, -1
+ msgc %r0, 0
+ msgc %r0, 1
+ msgc %r0, 524287
+ msgc %r0, 0(%r1)
+ msgc %r0, 0(%r15)
+ msgc %r0, 524287(%r1,%r15)
+ msgc %r0, 524287(%r15,%r1)
+ msgc %r15, 0
+
+#CHECK: msrkc %r0, %r0, %r0 # encoding: [0xb9,0xfd,0x00,0x00]
+#CHECK: msrkc %r0, %r0, %r15 # encoding: [0xb9,0xfd,0xf0,0x00]
+#CHECK: msrkc %r0, %r15, %r0 # encoding: [0xb9,0xfd,0x00,0x0f]
+#CHECK: msrkc %r15, %r0, %r0 # encoding: [0xb9,0xfd,0x00,0xf0]
+#CHECK: msrkc %r7, %r8, %r9 # encoding: [0xb9,0xfd,0x90,0x78]
+
+ msrkc %r0,%r0,%r0
+ msrkc %r0,%r0,%r15
+ msrkc %r0,%r15,%r0
+ msrkc %r15,%r0,%r0
+ msrkc %r7,%r8,%r9
+
+#CHECK: msgrkc %r0, %r0, %r0 # encoding: [0xb9,0xed,0x00,0x00]
+#CHECK: msgrkc %r0, %r0, %r15 # encoding: [0xb9,0xed,0xf0,0x00]
+#CHECK: msgrkc %r0, %r15, %r0 # encoding: [0xb9,0xed,0x00,0x0f]
+#CHECK: msgrkc %r15, %r0, %r0 # encoding: [0xb9,0xed,0x00,0xf0]
+#CHECK: msgrkc %r7, %r8, %r9 # encoding: [0xb9,0xed,0x90,0x78]
+
+ msgrkc %r0,%r0,%r0
+ msgrkc %r0,%r0,%r15
+ msgrkc %r0,%r15,%r0
+ msgrkc %r15,%r0,%r0
+ msgrkc %r7,%r8,%r9
+
+#CHECK: prno %r2, %r2 # encoding: [0xb9,0x3c,0x00,0x22]
+#CHECK: prno %r2, %r14 # encoding: [0xb9,0x3c,0x00,0x2e]
+#CHECK: prno %r14, %r2 # encoding: [0xb9,0x3c,0x00,0xe2]
+#CHECK: prno %r6, %r10 # encoding: [0xb9,0x3c,0x00,0x6a]
+
+ prno %r2, %r2
+ prno %r2, %r14
+ prno %r14, %r2
+ prno %r6, %r10
+
+#CHECK: sgh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x39]
+#CHECK: sgh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x39]
+#CHECK: sgh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x39]
+#CHECK: sgh %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x39]
+#CHECK: sgh %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x39]
+#CHECK: sgh %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x39]
+#CHECK: sgh %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x39]
+#CHECK: sgh %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x39]
+#CHECK: sgh %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x39]
+#CHECK: sgh %r15, 0 # encoding: [0xe3,0xf0,0x00,0x00,0x00,0x39]
+
+ sgh %r0, -524288
+ sgh %r0, -1
+ sgh %r0, 0
+ sgh %r0, 1
+ sgh %r0, 524287
+ sgh %r0, 0(%r1)
+ sgh %r0, 0(%r15)
+ sgh %r0, 524287(%r1,%r15)
+ sgh %r0, 524287(%r15,%r1)
+ sgh %r15, 0
+
+#CHECK: stgsc %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0x49]
+#CHECK: stgsc %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0x49]
+#CHECK: stgsc %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0x49]
+#CHECK: stgsc %r0, 1 # encoding: [0xe3,0x00,0x00,0x01,0x00,0x49]
+#CHECK: stgsc %r0, 524287 # encoding: [0xe3,0x00,0x0f,0xff,0x7f,0x49]
+#CHECK: stgsc %r0, 0(%r1) # encoding: [0xe3,0x00,0x10,0x00,0x00,0x49]
+#CHECK: stgsc %r0, 0(%r15) # encoding: [0xe3,0x00,0xf0,0x00,0x00,0x49]
+#CHECK: stgsc %r0, 524287(%r1,%r15) # encoding: [0xe3,0x01,0xff,0xff,0x7f,0x49]
+#CHECK: stgsc %r0, 524287(%r15,%r1) # encoding: [0xe3,0x0f,0x1f,0xff,0x7f,0x49]
+
+ stgsc %r0, -524288
+ stgsc %r0, -1
+ stgsc %r0, 0
+ stgsc %r0, 1
+ stgsc %r0, 524287
+ stgsc %r0, 0(%r1)
+ stgsc %r0, 0(%r15)
+ stgsc %r0, 524287(%r1,%r15)
+ stgsc %r0, 524287(%r15,%r1)
+
+#CHECK: vap %v0, %v0, %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x71]
+#CHECK: vap %v0, %v0, %v0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x71]
+#CHECK: vap %v0, %v0, %v0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x71]
+#CHECK: vap %v0, %v0, %v31, 0, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x02,0x71]
+#CHECK: vap %v0, %v31, %v0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x71]
+#CHECK: vap %v31, %v0, %v0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x71]
+#CHECK: vap %v13, %v17, %v21, 121, 11 # encoding: [0xe6,0xd1,0x50,0xb7,0x96,0x71]
+
+ vap %v0, %v0, %v0, 0, 0
+ vap %v0, %v0, %v0, 0, 15
+ vap %v0, %v0, %v0, 255, 0
+ vap %v0, %v0, %v31, 0, 0
+ vap %v0, %v31, %v0, 0, 0
+ vap %v31, %v0, %v0, 0, 0
+ vap %v13, %v17, %v21, 0x79, 11
+
+#CHECK: vbperm %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x85]
+#CHECK: vbperm %v0, %v0, %v15 # encoding: [0xe7,0x00,0xf0,0x00,0x00,0x85]
+#CHECK: vbperm %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x85]
+#CHECK: vbperm %v0, %v15, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0x85]
+#CHECK: vbperm %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x85]
+#CHECK: vbperm %v15, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x85]
+#CHECK: vbperm %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x85]
+#CHECK: vbperm %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x85]
+
+ vbperm %v0, %v0, %v0
+ vbperm %v0, %v0, %v15
+ vbperm %v0, %v0, %v31
+ vbperm %v0, %v15, %v0
+ vbperm %v0, %v31, %v0
+ vbperm %v15, %v0, %v0
+ vbperm %v31, %v0, %v0
+ vbperm %v18, %v3, %v20
+
+#CHECK: vcp %v0, %v0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x77]
+#CHECK: vcp %v0, %v0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x77]
+#CHECK: vcp %v15, %v0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x00,0x77]
+#CHECK: vcp %v31, %v0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x77]
+#CHECK: vcp %v0, %v15, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x00,0x77]
+#CHECK: vcp %v0, %v31, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x02,0x77]
+#CHECK: vcp %v3, %v18, 4 # encoding: [0xe6,0x03,0x20,0x40,0x02,0x77]
+
+ vcp %v0, %v0, 0
+ vcp %v0, %v0, 15
+ vcp %v15, %v0, 0
+ vcp %v31, %v0, 0
+ vcp %v0, %v15, 0
+ vcp %v0, %v31, 0
+ vcp %v3, %v18, 4
+
+#CHECK: vcvb %r0, %v0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x50]
+#CHECK: vcvb %r0, %v0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x50]
+#CHECK: vcvb %r15, %v0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x00,0x50]
+#CHECK: vcvb %r0, %v15, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x00,0x50]
+#CHECK: vcvb %r0, %v31, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x50]
+#CHECK: vcvb %r3, %v18, 4 # encoding: [0xe6,0x32,0x00,0x40,0x04,0x50]
+
+ vcvb %r0, %v0, 0
+ vcvb %r0, %v0, 15
+ vcvb %r15, %v0, 0
+ vcvb %r0, %v15, 0
+ vcvb %r0, %v31, 0
+ vcvb %r3, %v18, 4
+
+#CHECK: vcvbg %r0, %v0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x52]
+#CHECK: vcvbg %r0, %v0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x52]
+#CHECK: vcvbg %r15, %v0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x00,0x52]
+#CHECK: vcvbg %r0, %v15, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x00,0x52]
+#CHECK: vcvbg %r0, %v31, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x52]
+#CHECK: vcvbg %r3, %v18, 4 # encoding: [0xe6,0x32,0x00,0x40,0x04,0x52]
+
+ vcvbg %r0, %v0, 0
+ vcvbg %r0, %v0, 15
+ vcvbg %r15, %v0, 0
+ vcvbg %r0, %v15, 0
+ vcvbg %r0, %v31, 0
+ vcvbg %r3, %v18, 4
+
+#CHECK: vcvd %v0, %r0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x58]
+#CHECK: vcvd %v0, %r0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x58]
+#CHECK: vcvd %v0, %r0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x58]
+#CHECK: vcvd %v0, %r15, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x00,0x58]
+#CHECK: vcvd %v15, %r0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x00,0x58]
+#CHECK: vcvd %v31, %r0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x58]
+#CHECK: vcvd %v18, %r9, 52, 11 # encoding: [0xe6,0x29,0x00,0xb3,0x48,0x58]
+
+ vcvd %v0, %r0, 0, 0
+ vcvd %v0, %r0, 0, 15
+ vcvd %v0, %r0, 255, 0
+ vcvd %v0, %r15, 0, 0
+ vcvd %v15, %r0, 0, 0
+ vcvd %v31, %r0, 0, 0
+ vcvd %v18, %r9, 0x34, 11
+
+#CHECK: vcvdg %v0, %r0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x5a]
+#CHECK: vcvdg %v0, %r0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x5a]
+#CHECK: vcvdg %v0, %r0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x5a]
+#CHECK: vcvdg %v0, %r15, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x00,0x5a]
+#CHECK: vcvdg %v15, %r0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x00,0x5a]
+#CHECK: vcvdg %v31, %r0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x5a]
+#CHECK: vcvdg %v18, %r9, 52, 11 # encoding: [0xe6,0x29,0x00,0xb3,0x48,0x5a]
+
+ vcvdg %v0, %r0, 0, 0
+ vcvdg %v0, %r0, 0, 15
+ vcvdg %v0, %r0, 255, 0
+ vcvdg %v0, %r15, 0, 0
+ vcvdg %v15, %r0, 0, 0
+ vcvdg %v31, %r0, 0, 0
+ vcvdg %v18, %r9, 0x34, 11
+
+#CHECK: vdp %v0, %v0, %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x7a]
+#CHECK: vdp %v0, %v0, %v0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x7a]
+#CHECK: vdp %v0, %v0, %v0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x7a]
+#CHECK: vdp %v0, %v0, %v31, 0, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x02,0x7a]
+#CHECK: vdp %v0, %v31, %v0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x7a]
+#CHECK: vdp %v31, %v0, %v0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x7a]
+#CHECK: vdp %v13, %v17, %v21, 121, 11 # encoding: [0xe6,0xd1,0x50,0xb7,0x96,0x7a]
+
+ vdp %v0, %v0, %v0, 0, 0
+ vdp %v0, %v0, %v0, 0, 15
+ vdp %v0, %v0, %v0, 255, 0
+ vdp %v0, %v0, %v31, 0, 0
+ vdp %v0, %v31, %v0, 0, 0
+ vdp %v31, %v0, %v0, 0, 0
+ vdp %v13, %v17, %v21, 0x79, 11
+
+#CHECK: vfasb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xe3]
+#CHECK: vfasb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xe3]
+#CHECK: vfasb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xe3]
+#CHECK: vfasb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xe3]
+#CHECK: vfasb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0xe3]
+
+ vfasb %v0, %v0, %v0
+ vfasb %v0, %v0, %v31
+ vfasb %v0, %v31, %v0
+ vfasb %v31, %v0, %v0
+ vfasb %v18, %v3, %v20
+
+#CHECK: vfcesb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xe8]
+#CHECK: vfcesb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xe8]
+#CHECK: vfcesb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xe8]
+#CHECK: vfcesb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xe8]
+#CHECK: vfcesb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0xe8]
+
+ vfcesb %v0, %v0, %v0
+ vfcesb %v0, %v0, %v31
+ vfcesb %v0, %v31, %v0
+ vfcesb %v31, %v0, %v0
+ vfcesb %v18, %v3, %v20
+
+#CHECK: vfcesbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x10,0x20,0xe8]
+#CHECK: vfcesbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x10,0x22,0xe8]
+#CHECK: vfcesbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x10,0x24,0xe8]
+#CHECK: vfcesbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x10,0x28,0xe8]
+#CHECK: vfcesbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x10,0x2a,0xe8]
+
+ vfcesbs %v0, %v0, %v0
+ vfcesbs %v0, %v0, %v31
+ vfcesbs %v0, %v31, %v0
+ vfcesbs %v31, %v0, %v0
+ vfcesbs %v18, %v3, %v20
+
+#CHECK: vfchsb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xeb]
+#CHECK: vfchsb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xeb]
+#CHECK: vfchsb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xeb]
+#CHECK: vfchsb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xeb]
+#CHECK: vfchsb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0xeb]
+
+ vfchsb %v0, %v0, %v0
+ vfchsb %v0, %v0, %v31
+ vfchsb %v0, %v31, %v0
+ vfchsb %v31, %v0, %v0
+ vfchsb %v18, %v3, %v20
+
+#CHECK: vfchsbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x10,0x20,0xeb]
+#CHECK: vfchsbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x10,0x22,0xeb]
+#CHECK: vfchsbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x10,0x24,0xeb]
+#CHECK: vfchsbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x10,0x28,0xeb]
+#CHECK: vfchsbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x10,0x2a,0xeb]
+
+ vfchsbs %v0, %v0, %v0
+ vfchsbs %v0, %v0, %v31
+ vfchsbs %v0, %v31, %v0
+ vfchsbs %v31, %v0, %v0
+ vfchsbs %v18, %v3, %v20
+
+#CHECK: vfchesb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xea]
+#CHECK: vfchesb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xea]
+#CHECK: vfchesb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xea]
+#CHECK: vfchesb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xea]
+#CHECK: vfchesb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0xea]
+
+ vfchesb %v0, %v0, %v0
+ vfchesb %v0, %v0, %v31
+ vfchesb %v0, %v31, %v0
+ vfchesb %v31, %v0, %v0
+ vfchesb %v18, %v3, %v20
+
+#CHECK: vfchesbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x10,0x20,0xea]
+#CHECK: vfchesbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x10,0x22,0xea]
+#CHECK: vfchesbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x10,0x24,0xea]
+#CHECK: vfchesbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x10,0x28,0xea]
+#CHECK: vfchesbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x10,0x2a,0xea]
+
+ vfchesbs %v0, %v0, %v0
+ vfchesbs %v0, %v0, %v31
+ vfchesbs %v0, %v31, %v0
+ vfchesbs %v31, %v0, %v0
+ vfchesbs %v18, %v3, %v20
+
+#CHECK: vfdsb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xe5]
+#CHECK: vfdsb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xe5]
+#CHECK: vfdsb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xe5]
+#CHECK: vfdsb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xe5]
+#CHECK: vfdsb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0xe5]
+
+ vfdsb %v0, %v0, %v0
+ vfdsb %v0, %v0, %v31
+ vfdsb %v0, %v31, %v0
+ vfdsb %v31, %v0, %v0
+ vfdsb %v18, %v3, %v20
+
+#CHECK: vfisb %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xc7]
+#CHECK: vfisb %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x20,0xc7]
+#CHECK: vfisb %v0, %v0, 4, 0 # encoding: [0xe7,0x00,0x00,0x04,0x20,0xc7]
+#CHECK: vfisb %v0, %v0, 7, 0 # encoding: [0xe7,0x00,0x00,0x07,0x20,0xc7]
+#CHECK: vfisb %v0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xc7]
+#CHECK: vfisb %v31, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xc7]
+#CHECK: vfisb %v14, %v17, 4, 10 # encoding: [0xe7,0xe1,0x00,0xa4,0x24,0xc7]
+
+ vfisb %v0, %v0, 0, 0
+ vfisb %v0, %v0, 0, 15
+ vfisb %v0, %v0, 4, 0
+ vfisb %v0, %v0, 7, 0
+ vfisb %v0, %v31, 0, 0
+ vfisb %v31, %v0, 0, 0
+ vfisb %v14, %v17, 4, 10
+
+#CHECK: vfkedb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x04,0x30,0xe8]
+#CHECK: vfkedb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x04,0x32,0xe8]
+#CHECK: vfkedb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x04,0x34,0xe8]
+#CHECK: vfkedb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x04,0x38,0xe8]
+#CHECK: vfkedb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x04,0x3a,0xe8]
+
+ vfkedb %v0, %v0, %v0
+ vfkedb %v0, %v0, %v31
+ vfkedb %v0, %v31, %v0
+ vfkedb %v31, %v0, %v0
+ vfkedb %v18, %v3, %v20
+
+#CHECK: vfkedbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x14,0x30,0xe8]
+#CHECK: vfkedbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x14,0x32,0xe8]
+#CHECK: vfkedbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x14,0x34,0xe8]
+#CHECK: vfkedbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x14,0x38,0xe8]
+#CHECK: vfkedbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x14,0x3a,0xe8]
+
+ vfkedbs %v0, %v0, %v0
+ vfkedbs %v0, %v0, %v31
+ vfkedbs %v0, %v31, %v0
+ vfkedbs %v31, %v0, %v0
+ vfkedbs %v18, %v3, %v20
+
+#CHECK: vfkesb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x04,0x20,0xe8]
+#CHECK: vfkesb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x04,0x22,0xe8]
+#CHECK: vfkesb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x04,0x24,0xe8]
+#CHECK: vfkesb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x04,0x28,0xe8]
+#CHECK: vfkesb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x04,0x2a,0xe8]
+
+ vfkesb %v0, %v0, %v0
+ vfkesb %v0, %v0, %v31
+ vfkesb %v0, %v31, %v0
+ vfkesb %v31, %v0, %v0
+ vfkesb %v18, %v3, %v20
+
+#CHECK: vfkesbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x14,0x20,0xe8]
+#CHECK: vfkesbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x14,0x22,0xe8]
+#CHECK: vfkesbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x14,0x24,0xe8]
+#CHECK: vfkesbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x14,0x28,0xe8]
+#CHECK: vfkesbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x14,0x2a,0xe8]
+
+ vfkesbs %v0, %v0, %v0
+ vfkesbs %v0, %v0, %v31
+ vfkesbs %v0, %v31, %v0
+ vfkesbs %v31, %v0, %v0
+ vfkesbs %v18, %v3, %v20
+
+#CHECK: vfkhdb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x04,0x30,0xeb]
+#CHECK: vfkhdb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x04,0x32,0xeb]
+#CHECK: vfkhdb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x04,0x34,0xeb]
+#CHECK: vfkhdb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x04,0x38,0xeb]
+#CHECK: vfkhdb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x04,0x3a,0xeb]
+
+ vfkhdb %v0, %v0, %v0
+ vfkhdb %v0, %v0, %v31
+ vfkhdb %v0, %v31, %v0
+ vfkhdb %v31, %v0, %v0
+ vfkhdb %v18, %v3, %v20
+
+#CHECK: vfkhdbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x14,0x30,0xeb]
+#CHECK: vfkhdbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x14,0x32,0xeb]
+#CHECK: vfkhdbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x14,0x34,0xeb]
+#CHECK: vfkhdbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x14,0x38,0xeb]
+#CHECK: vfkhdbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x14,0x3a,0xeb]
+
+ vfkhdbs %v0, %v0, %v0
+ vfkhdbs %v0, %v0, %v31
+ vfkhdbs %v0, %v31, %v0
+ vfkhdbs %v31, %v0, %v0
+ vfkhdbs %v18, %v3, %v20
+
+#CHECK: vfkhsb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x04,0x20,0xeb]
+#CHECK: vfkhsb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x04,0x22,0xeb]
+#CHECK: vfkhsb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x04,0x24,0xeb]
+#CHECK: vfkhsb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x04,0x28,0xeb]
+#CHECK: vfkhsb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x04,0x2a,0xeb]
+
+ vfkhsb %v0, %v0, %v0
+ vfkhsb %v0, %v0, %v31
+ vfkhsb %v0, %v31, %v0
+ vfkhsb %v31, %v0, %v0
+ vfkhsb %v18, %v3, %v20
+
+#CHECK: vfkhsbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x14,0x20,0xeb]
+#CHECK: vfkhsbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x14,0x22,0xeb]
+#CHECK: vfkhsbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x14,0x24,0xeb]
+#CHECK: vfkhsbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x14,0x28,0xeb]
+#CHECK: vfkhsbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x14,0x2a,0xeb]
+
+ vfkhsbs %v0, %v0, %v0
+ vfkhsbs %v0, %v0, %v31
+ vfkhsbs %v0, %v31, %v0
+ vfkhsbs %v31, %v0, %v0
+ vfkhsbs %v18, %v3, %v20
+
+#CHECK: vfkhedb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x04,0x30,0xea]
+#CHECK: vfkhedb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x04,0x32,0xea]
+#CHECK: vfkhedb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x04,0x34,0xea]
+#CHECK: vfkhedb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x04,0x38,0xea]
+#CHECK: vfkhedb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x04,0x3a,0xea]
+
+ vfkhedb %v0, %v0, %v0
+ vfkhedb %v0, %v0, %v31
+ vfkhedb %v0, %v31, %v0
+ vfkhedb %v31, %v0, %v0
+ vfkhedb %v18, %v3, %v20
+
+#CHECK: vfkhedbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x14,0x30,0xea]
+#CHECK: vfkhedbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x14,0x32,0xea]
+#CHECK: vfkhedbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x14,0x34,0xea]
+#CHECK: vfkhedbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x14,0x38,0xea]
+#CHECK: vfkhedbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x14,0x3a,0xea]
+
+ vfkhedbs %v0, %v0, %v0
+ vfkhedbs %v0, %v0, %v31
+ vfkhedbs %v0, %v31, %v0
+ vfkhedbs %v31, %v0, %v0
+ vfkhedbs %v18, %v3, %v20
+
+#CHECK: vfkhesb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x04,0x20,0xea]
+#CHECK: vfkhesb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x04,0x22,0xea]
+#CHECK: vfkhesb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x04,0x24,0xea]
+#CHECK: vfkhesb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x04,0x28,0xea]
+#CHECK: vfkhesb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x04,0x2a,0xea]
+
+ vfkhesb %v0, %v0, %v0
+ vfkhesb %v0, %v0, %v31
+ vfkhesb %v0, %v31, %v0
+ vfkhesb %v31, %v0, %v0
+ vfkhesb %v18, %v3, %v20
+
+#CHECK: vfkhesbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x14,0x20,0xea]
+#CHECK: vfkhesbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x14,0x22,0xea]
+#CHECK: vfkhesbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x14,0x24,0xea]
+#CHECK: vfkhesbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x14,0x28,0xea]
+#CHECK: vfkhesbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x14,0x2a,0xea]
+
+ vfkhesbs %v0, %v0, %v0
+ vfkhesbs %v0, %v0, %v31
+ vfkhesbs %v0, %v31, %v0
+ vfkhesbs %v31, %v0, %v0
+ vfkhesbs %v18, %v3, %v20
+
+#CHECK: vfpsosb %v0, %v0, 3 # encoding: [0xe7,0x00,0x00,0x30,0x20,0xcc]
+#CHECK: vfpsosb %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x20,0xcc]
+#CHECK: vfpsosb %v0, %v15, 3 # encoding: [0xe7,0x0f,0x00,0x30,0x20,0xcc]
+#CHECK: vfpsosb %v0, %v31, 3 # encoding: [0xe7,0x0f,0x00,0x30,0x24,0xcc]
+#CHECK: vfpsosb %v15, %v0, 3 # encoding: [0xe7,0xf0,0x00,0x30,0x20,0xcc]
+#CHECK: vfpsosb %v31, %v0, 3 # encoding: [0xe7,0xf0,0x00,0x30,0x28,0xcc]
+#CHECK: vfpsosb %v14, %v17, 7 # encoding: [0xe7,0xe1,0x00,0x70,0x24,0xcc]
+
+ vfpsosb %v0, %v0, 3
+ vfpsosb %v0, %v0, 15
+ vfpsosb %v0, %v15, 3
+ vfpsosb %v0, %v31, 3
+ vfpsosb %v15, %v0, 3
+ vfpsosb %v31, %v0, 3
+ vfpsosb %v14, %v17, 7
+
+#CHECK: vflcsb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xcc]
+#CHECK: vflcsb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0xcc]
+#CHECK: vflcsb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xcc]
+#CHECK: vflcsb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0xcc]
+#CHECK: vflcsb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xcc]
+#CHECK: vflcsb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x24,0xcc]
+
+ vflcsb %v0, %v0
+ vflcsb %v0, %v15
+ vflcsb %v0, %v31
+ vflcsb %v15, %v0
+ vflcsb %v31, %v0
+ vflcsb %v14, %v17
+
+#CHECK: vflnsb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x10,0x20,0xcc]
+#CHECK: vflnsb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x10,0x20,0xcc]
+#CHECK: vflnsb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x10,0x24,0xcc]
+#CHECK: vflnsb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x10,0x20,0xcc]
+#CHECK: vflnsb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x10,0x28,0xcc]
+#CHECK: vflnsb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x10,0x24,0xcc]
+
+ vflnsb %v0, %v0
+ vflnsb %v0, %v15
+ vflnsb %v0, %v31
+ vflnsb %v15, %v0
+ vflnsb %v31, %v0
+ vflnsb %v14, %v17
+
+#CHECK: vflpsb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x20,0x20,0xcc]
+#CHECK: vflpsb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x20,0x20,0xcc]
+#CHECK: vflpsb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x20,0x24,0xcc]
+#CHECK: vflpsb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x20,0x20,0xcc]
+#CHECK: vflpsb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x20,0x28,0xcc]
+#CHECK: vflpsb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x20,0x24,0xcc]
+
+ vflpsb %v0, %v0
+ vflpsb %v0, %v15
+ vflpsb %v0, %v31
+ vflpsb %v15, %v0
+ vflpsb %v31, %v0
+ vflpsb %v14, %v17
+
+#CHECK: vfll %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xc4]
+#CHECK: vfll %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xc4]
+#CHECK: vfll %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xc4]
+#CHECK: vfll %v0, %v15, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xc4]
+#CHECK: vfll %v0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xc4]
+#CHECK: vfll %v15, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0xc4]
+#CHECK: vfll %v31, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xc4]
+#CHECK: vfll %v14, %v17, 11, 9 # encoding: [0xe7,0xe1,0x00,0x09,0xb4,0xc4]
+
+ vfll %v0, %v0, 0, 0
+ vfll %v0, %v0, 15, 0
+ vfll %v0, %v0, 0, 15
+ vfll %v0, %v15, 0, 0
+ vfll %v0, %v31, 0, 0
+ vfll %v15, %v0, 0, 0
+ vfll %v31, %v0, 0, 0
+ vfll %v14, %v17, 11, 9
+
+#CHECK: vflls %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xc4]
+#CHECK: vflls %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0xc4]
+#CHECK: vflls %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xc4]
+#CHECK: vflls %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0xc4]
+#CHECK: vflls %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xc4]
+#CHECK: vflls %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x24,0xc4]
+
+ vflls %v0, %v0
+ vflls %v0, %v15
+ vflls %v0, %v31
+ vflls %v15, %v0
+ vflls %v31, %v0
+ vflls %v14, %v17
+
+#CHECK: vflr %v0, %v0, 0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xc5]
+#CHECK: vflr %v0, %v0, 15, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xc5]
+#CHECK: vflr %v0, %v0, 0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x00,0xc5]
+#CHECK: vflr %v0, %v0, 0, 4, 0 # encoding: [0xe7,0x00,0x00,0x04,0x00,0xc5]
+#CHECK: vflr %v0, %v0, 0, 12, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x00,0xc5]
+#CHECK: vflr %v0, %v31, 0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xc5]
+#CHECK: vflr %v31, %v0, 0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xc5]
+#CHECK: vflr %v14, %v17, 11, 4, 10 # encoding: [0xe7,0xe1,0x00,0xa4,0xb4,0xc5]
+
+ vflr %v0, %v0, 0, 0, 0
+ vflr %v0, %v0, 15, 0, 0
+ vflr %v0, %v0, 0, 0, 15
+ vflr %v0, %v0, 0, 4, 0
+ vflr %v0, %v0, 0, 12, 0
+ vflr %v0, %v31, 0, 0, 0
+ vflr %v31, %v0, 0, 0, 0
+ vflr %v14, %v17, 11, 4, 10
+
+#CHECK: vflrd %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0xc5]
+#CHECK: vflrd %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf0,0x30,0xc5]
+#CHECK: vflrd %v0, %v0, 4, 0 # encoding: [0xe7,0x00,0x00,0x04,0x30,0xc5]
+#CHECK: vflrd %v0, %v0, 12, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc5]
+#CHECK: vflrd %v0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0xc5]
+#CHECK: vflrd %v31, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0xc5]
+#CHECK: vflrd %v14, %v17, 4, 10 # encoding: [0xe7,0xe1,0x00,0xa4,0x34,0xc5]
+
+ vflrd %v0, %v0, 0, 0
+ vflrd %v0, %v0, 0, 15
+ vflrd %v0, %v0, 4, 0
+ vflrd %v0, %v0, 12, 0
+ vflrd %v0, %v31, 0, 0
+ vflrd %v31, %v0, 0, 0
+ vflrd %v14, %v17, 4, 10
+
+#CHECK: vfmax %v0, %v0, %v0, 0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xef]
+#CHECK: vfmax %v0, %v0, %v0, 15, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xef]
+#CHECK: vfmax %v0, %v0, %v0, 0, 15, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xef]
+#CHECK: vfmax %v0, %v0, %v0, 0, 0, 4 # encoding: [0xe7,0x00,0x00,0x40,0x00,0xef]
+#CHECK: vfmax %v0, %v0, %v31, 0, 0, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0xef]
+#CHECK: vfmax %v0, %v31, %v0, 0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xef]
+#CHECK: vfmax %v31, %v0, %v0, 0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xef]
+#CHECK: vfmax %v18, %v3, %v20, 11, 9, 12 # encoding: [0xe7,0x23,0x40,0xc9,0xba,0xef]
+
+ vfmax %v0, %v0, %v0, 0, 0, 0
+ vfmax %v0, %v0, %v0, 15, 0, 0
+ vfmax %v0, %v0, %v0, 0, 15, 0
+ vfmax %v0, %v0, %v0, 0, 0, 4
+ vfmax %v0, %v0, %v31, 0, 0, 0
+ vfmax %v0, %v31, %v0, 0, 0, 0
+ vfmax %v31, %v0, %v0, 0, 0, 0
+ vfmax %v18, %v3, %v20, 11, 9, 12
+
+#CHECK: vfmaxdb %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0xef]
+#CHECK: vfmaxdb %v0, %v0, %v0, 4 # encoding: [0xe7,0x00,0x00,0x40,0x30,0xef]
+#CHECK: vfmaxdb %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0xef]
+#CHECK: vfmaxdb %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0xef]
+#CHECK: vfmaxdb %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0xef]
+#CHECK: vfmaxdb %v18, %v3, %v20, 12 # encoding: [0xe7,0x23,0x40,0xc0,0x3a,0xef]
+
+ vfmaxdb %v0, %v0, %v0, 0
+ vfmaxdb %v0, %v0, %v0, 4
+ vfmaxdb %v0, %v0, %v31, 0
+ vfmaxdb %v0, %v31, %v0, 0
+ vfmaxdb %v31, %v0, %v0, 0
+ vfmaxdb %v18, %v3, %v20, 12
+
+#CHECK: vfmaxsb %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xef]
+#CHECK: vfmaxsb %v0, %v0, %v0, 4 # encoding: [0xe7,0x00,0x00,0x40,0x20,0xef]
+#CHECK: vfmaxsb %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xef]
+#CHECK: vfmaxsb %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xef]
+#CHECK: vfmaxsb %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xef]
+#CHECK: vfmaxsb %v18, %v3, %v20, 12 # encoding: [0xe7,0x23,0x40,0xc0,0x2a,0xef]
+
+ vfmaxsb %v0, %v0, %v0, 0
+ vfmaxsb %v0, %v0, %v0, 4
+ vfmaxsb %v0, %v0, %v31, 0
+ vfmaxsb %v0, %v31, %v0, 0
+ vfmaxsb %v31, %v0, %v0, 0
+ vfmaxsb %v18, %v3, %v20, 12
+
+#CHECK: vfmin %v0, %v0, %v0, 0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xee]
+#CHECK: vfmin %v0, %v0, %v0, 15, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xee]
+#CHECK: vfmin %v0, %v0, %v0, 0, 15, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0xee]
+#CHECK: vfmin %v0, %v0, %v0, 0, 0, 4 # encoding: [0xe7,0x00,0x00,0x40,0x00,0xee]
+#CHECK: vfmin %v0, %v0, %v31, 0, 0, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0xee]
+#CHECK: vfmin %v0, %v31, %v0, 0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xee]
+#CHECK: vfmin %v31, %v0, %v0, 0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xee]
+#CHECK: vfmin %v18, %v3, %v20, 11, 9, 12 # encoding: [0xe7,0x23,0x40,0xc9,0xba,0xee]
+
+ vfmin %v0, %v0, %v0, 0, 0, 0
+ vfmin %v0, %v0, %v0, 15, 0, 0
+ vfmin %v0, %v0, %v0, 0, 15, 0
+ vfmin %v0, %v0, %v0, 0, 0, 4
+ vfmin %v0, %v0, %v31, 0, 0, 0
+ vfmin %v0, %v31, %v0, 0, 0, 0
+ vfmin %v31, %v0, %v0, 0, 0, 0
+ vfmin %v18, %v3, %v20, 11, 9, 12
+
+#CHECK: vfmindb %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0xee]
+#CHECK: vfmindb %v0, %v0, %v0, 4 # encoding: [0xe7,0x00,0x00,0x40,0x30,0xee]
+#CHECK: vfmindb %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x32,0xee]
+#CHECK: vfmindb %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0xee]
+#CHECK: vfmindb %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0xee]
+#CHECK: vfmindb %v18, %v3, %v20, 12 # encoding: [0xe7,0x23,0x40,0xc0,0x3a,0xee]
+
+ vfmindb %v0, %v0, %v0, 0
+ vfmindb %v0, %v0, %v0, 4
+ vfmindb %v0, %v0, %v31, 0
+ vfmindb %v0, %v31, %v0, 0
+ vfmindb %v31, %v0, %v0, 0
+ vfmindb %v18, %v3, %v20, 12
+
+#CHECK: vfminsb %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xee]
+#CHECK: vfminsb %v0, %v0, %v0, 4 # encoding: [0xe7,0x00,0x00,0x40,0x20,0xee]
+#CHECK: vfminsb %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xee]
+#CHECK: vfminsb %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xee]
+#CHECK: vfminsb %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xee]
+#CHECK: vfminsb %v18, %v3, %v20, 12 # encoding: [0xe7,0x23,0x40,0xc0,0x2a,0xee]
+
+ vfminsb %v0, %v0, %v0, 0
+ vfminsb %v0, %v0, %v0, 4
+ vfminsb %v0, %v0, %v31, 0
+ vfminsb %v0, %v31, %v0, 0
+ vfminsb %v31, %v0, %v0, 0
+ vfminsb %v18, %v3, %v20, 12
+
+#CHECK: vfmasb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x02,0x00,0x00,0x8f]
+#CHECK: vfmasb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x02,0x00,0xf1,0x8f]
+#CHECK: vfmasb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf2,0x00,0x02,0x8f]
+#CHECK: vfmasb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x02,0x00,0x04,0x8f]
+#CHECK: vfmasb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x02,0x00,0x08,0x8f]
+#CHECK: vfmasb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x52,0x00,0x97,0x8f]
+
+ vfmasb %v0, %v0, %v0, %v0
+ vfmasb %v0, %v0, %v0, %v31
+ vfmasb %v0, %v0, %v31, %v0
+ vfmasb %v0, %v31, %v0, %v0
+ vfmasb %v31, %v0, %v0, %v0
+ vfmasb %v13, %v17, %v21, %v25
+
+#CHECK: vfmsb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xe7]
+#CHECK: vfmsb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xe7]
+#CHECK: vfmsb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xe7]
+#CHECK: vfmsb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xe7]
+#CHECK: vfmsb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0xe7]
+
+ vfmsb %v0, %v0, %v0
+ vfmsb %v0, %v0, %v31
+ vfmsb %v0, %v31, %v0
+ vfmsb %v31, %v0, %v0
+ vfmsb %v18, %v3, %v20
+
+#CHECK: vfmssb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x02,0x00,0x00,0x8e]
+#CHECK: vfmssb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x02,0x00,0xf1,0x8e]
+#CHECK: vfmssb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf2,0x00,0x02,0x8e]
+#CHECK: vfmssb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x02,0x00,0x04,0x8e]
+#CHECK: vfmssb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x02,0x00,0x08,0x8e]
+#CHECK: vfmssb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x52,0x00,0x97,0x8e]
+
+ vfmssb %v0, %v0, %v0, %v0
+ vfmssb %v0, %v0, %v0, %v31
+ vfmssb %v0, %v0, %v31, %v0
+ vfmssb %v0, %v31, %v0, %v0
+ vfmssb %v31, %v0, %v0, %v0
+ vfmssb %v13, %v17, %v21, %v25
+
+#CHECK: vfnma %v0, %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x9f]
+#CHECK: vfnma %v0, %v0, %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x0f,0x00,0x00,0x9f]
+#CHECK: vfnma %v0, %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0x9f]
+#CHECK: vfnma %v0, %v0, %v0, %v31, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf1,0x9f]
+#CHECK: vfnma %v0, %v0, %v31, %v0, 0, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x9f]
+#CHECK: vfnma %v0, %v31, %v0, %v0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x9f]
+#CHECK: vfnma %v31, %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x9f]
+#CHECK: vfnma %v13, %v17, %v21, %v25, 9, 11 # encoding: [0xe7,0xd1,0x5b,0x09,0x97,0x9f]
+
+ vfnma %v0, %v0, %v0, %v0, 0, 0
+ vfnma %v0, %v0, %v0, %v0, 0, 15
+ vfnma %v0, %v0, %v0, %v0, 15, 0
+ vfnma %v0, %v0, %v0, %v31, 0, 0
+ vfnma %v0, %v0, %v31, %v0, 0, 0
+ vfnma %v0, %v31, %v0, %v0, 0, 0
+ vfnma %v31, %v0, %v0, %v0, 0, 0
+ vfnma %v13, %v17, %v21, %v25, 9, 11
+
+#CHECK: vfnmadb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x03,0x00,0x00,0x9f]
+#CHECK: vfnmadb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x03,0x00,0xf1,0x9f]
+#CHECK: vfnmadb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf3,0x00,0x02,0x9f]
+#CHECK: vfnmadb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x03,0x00,0x04,0x9f]
+#CHECK: vfnmadb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x03,0x00,0x08,0x9f]
+#CHECK: vfnmadb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x53,0x00,0x97,0x9f]
+
+ vfnmadb %v0, %v0, %v0, %v0
+ vfnmadb %v0, %v0, %v0, %v31
+ vfnmadb %v0, %v0, %v31, %v0
+ vfnmadb %v0, %v31, %v0, %v0
+ vfnmadb %v31, %v0, %v0, %v0
+ vfnmadb %v13, %v17, %v21, %v25
+
+#CHECK: vfnmasb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x02,0x00,0x00,0x9f]
+#CHECK: vfnmasb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x02,0x00,0xf1,0x9f]
+#CHECK: vfnmasb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf2,0x00,0x02,0x9f]
+#CHECK: vfnmasb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x02,0x00,0x04,0x9f]
+#CHECK: vfnmasb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x02,0x00,0x08,0x9f]
+#CHECK: vfnmasb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x52,0x00,0x97,0x9f]
+
+ vfnmasb %v0, %v0, %v0, %v0
+ vfnmasb %v0, %v0, %v0, %v31
+ vfnmasb %v0, %v0, %v31, %v0
+ vfnmasb %v0, %v31, %v0, %v0
+ vfnmasb %v31, %v0, %v0, %v0
+ vfnmasb %v13, %v17, %v21, %v25
+
+#CHECK: vfnms %v0, %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x9e]
+#CHECK: vfnms %v0, %v0, %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x0f,0x00,0x00,0x9e]
+#CHECK: vfnms %v0, %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x00,0x9e]
+#CHECK: vfnms %v0, %v0, %v0, %v31, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf1,0x9e]
+#CHECK: vfnms %v0, %v0, %v31, %v0, 0, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x9e]
+#CHECK: vfnms %v0, %v31, %v0, %v0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x9e]
+#CHECK: vfnms %v31, %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x9e]
+#CHECK: vfnms %v13, %v17, %v21, %v25, 9, 11 # encoding: [0xe7,0xd1,0x5b,0x09,0x97,0x9e]
+
+ vfnms %v0, %v0, %v0, %v0, 0, 0
+ vfnms %v0, %v0, %v0, %v0, 0, 15
+ vfnms %v0, %v0, %v0, %v0, 15, 0
+ vfnms %v0, %v0, %v0, %v31, 0, 0
+ vfnms %v0, %v0, %v31, %v0, 0, 0
+ vfnms %v0, %v31, %v0, %v0, 0, 0
+ vfnms %v31, %v0, %v0, %v0, 0, 0
+ vfnms %v13, %v17, %v21, %v25, 9, 11
+
+#CHECK: vfnmsdb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x03,0x00,0x00,0x9e]
+#CHECK: vfnmsdb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x03,0x00,0xf1,0x9e]
+#CHECK: vfnmsdb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf3,0x00,0x02,0x9e]
+#CHECK: vfnmsdb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x03,0x00,0x04,0x9e]
+#CHECK: vfnmsdb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x03,0x00,0x08,0x9e]
+#CHECK: vfnmsdb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x53,0x00,0x97,0x9e]
+
+ vfnmsdb %v0, %v0, %v0, %v0
+ vfnmsdb %v0, %v0, %v0, %v31
+ vfnmsdb %v0, %v0, %v31, %v0
+ vfnmsdb %v0, %v31, %v0, %v0
+ vfnmsdb %v31, %v0, %v0, %v0
+ vfnmsdb %v13, %v17, %v21, %v25
+
+#CHECK: vfnmssb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x02,0x00,0x00,0x9e]
+#CHECK: vfnmssb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x02,0x00,0xf1,0x9e]
+#CHECK: vfnmssb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf2,0x00,0x02,0x9e]
+#CHECK: vfnmssb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x02,0x00,0x04,0x9e]
+#CHECK: vfnmssb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x02,0x00,0x08,0x9e]
+#CHECK: vfnmssb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x52,0x00,0x97,0x9e]
+
+ vfnmssb %v0, %v0, %v0, %v0
+ vfnmssb %v0, %v0, %v0, %v31
+ vfnmssb %v0, %v0, %v31, %v0
+ vfnmssb %v0, %v31, %v0, %v0
+ vfnmssb %v31, %v0, %v0, %v0
+ vfnmssb %v13, %v17, %v21, %v25
+
+#CHECK: vfssb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xe2]
+#CHECK: vfssb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x22,0xe2]
+#CHECK: vfssb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xe2]
+#CHECK: vfssb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xe2]
+#CHECK: vfssb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x2a,0xe2]
+
+ vfssb %v0, %v0, %v0
+ vfssb %v0, %v0, %v31
+ vfssb %v0, %v31, %v0
+ vfssb %v31, %v0, %v0
+ vfssb %v18, %v3, %v20
+
+#CHECK: vfsqsb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xce]
+#CHECK: vfsqsb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0xce]
+#CHECK: vfsqsb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xce]
+#CHECK: vfsqsb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0xce]
+#CHECK: vfsqsb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xce]
+#CHECK: vfsqsb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x24,0xce]
+
+ vfsqsb %v0, %v0
+ vfsqsb %v0, %v15
+ vfsqsb %v0, %v31
+ vfsqsb %v15, %v0
+ vfsqsb %v31, %v0
+ vfsqsb %v14, %v17
+
+#CHECK: vftcisb %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x4a]
+#CHECK: vftcisb %v0, %v0, 4095 # encoding: [0xe7,0x00,0xff,0xf0,0x20,0x4a]
+#CHECK: vftcisb %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0x4a]
+#CHECK: vftcisb %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x4a]
+#CHECK: vftcisb %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0x4a]
+#CHECK: vftcisb %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x4a]
+#CHECK: vftcisb %v4, %v21, 1656 # encoding: [0xe7,0x45,0x67,0x80,0x24,0x4a]
+
+ vftcisb %v0, %v0, 0
+ vftcisb %v0, %v0, 4095
+ vftcisb %v0, %v15, 0
+ vftcisb %v0, %v31, 0
+ vftcisb %v15, %v0, 0
+ vftcisb %v31, %v0, 0
+ vftcisb %v4, %v21, 0x678
+
+#CHECK: vlip %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x49]
+#CHECK: vlip %v0, 0, 15 # encoding: [0xe6,0x00,0x00,0x00,0xf0,0x49]
+#CHECK: vlip %v0, 65535, 0 # encoding: [0xe6,0x00,0xff,0xff,0x00,0x49]
+#CHECK: vlip %v15, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x00,0x49]
+#CHECK: vlip %v31, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x49]
+#CHECK: vlip %v17, 4660, 7 # encoding: [0xe6,0x10,0x12,0x34,0x78,0x49]
+
+ vlip %v0, 0, 0
+ vlip %v0, 0, 15
+ vlip %v0, 0xffff, 0
+ vlip %v15, 0, 0
+ vlip %v31, 0, 0
+ vlip %v17, 0x1234, 7
+
+#CHECK: vllezlf %v0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x60,0x04]
+#CHECK: vllezlf %v0, 4095 # encoding: [0xe7,0x00,0x0f,0xff,0x60,0x04]
+#CHECK: vllezlf %v0, 0(%r15) # encoding: [0xe7,0x00,0xf0,0x00,0x60,0x04]
+#CHECK: vllezlf %v0, 0(%r15,%r1) # encoding: [0xe7,0x0f,0x10,0x00,0x60,0x04]
+#CHECK: vllezlf %v15, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x60,0x04]
+#CHECK: vllezlf %v31, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x68,0x04]
+#CHECK: vllezlf %v18, 1383(%r3,%r4) # encoding: [0xe7,0x23,0x45,0x67,0x68,0x04]
+
+ vllezlf %v0, 0
+ vllezlf %v0, 4095
+ vllezlf %v0, 0(%r15)
+ vllezlf %v0, 0(%r15,%r1)
+ vllezlf %v15, 0
+ vllezlf %v31, 0
+ vllezlf %v18, 0x567(%r3,%r4)
+
+#CHECK: vlrl %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x35]
+#CHECK: vlrl %v0, 4095, 0 # encoding: [0xe6,0x00,0x0f,0xff,0x00,0x35]
+#CHECK: vlrl %v0, 0(%r15), 0 # encoding: [0xe6,0x00,0xf0,0x00,0x00,0x35]
+#CHECK: vlrl %v0, 0, 255 # encoding: [0xe6,0xff,0x00,0x00,0x00,0x35]
+#CHECK: vlrl %v15, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf0,0x35]
+#CHECK: vlrl %v31, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf1,0x35]
+#CHECK: vlrl %v18, 1383(%r4), 3 # encoding: [0xe6,0x03,0x45,0x67,0x21,0x35]
+
+ vlrl %v0, 0, 0
+ vlrl %v0, 4095, 0
+ vlrl %v0, 0(%r15), 0
+ vlrl %v0, 0, 255
+ vlrl %v15, 0, 0
+ vlrl %v31, 0, 0
+ vlrl %v18, 1383(%r4), 3
+
+#CHECK: vlrlr %v0, %r0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x37]
+#CHECK: vlrlr %v0, %r0, 4095 # encoding: [0xe6,0x00,0x0f,0xff,0x00,0x37]
+#CHECK: vlrlr %v0, %r0, 0(%r15) # encoding: [0xe6,0x00,0xf0,0x00,0x00,0x37]
+#CHECK: vlrlr %v0, %r15, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x00,0x37]
+#CHECK: vlrlr %v15, %r0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf0,0x37]
+#CHECK: vlrlr %v31, %r0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf1,0x37]
+#CHECK: vlrlr %v18, %r3, 1383(%r4) # encoding: [0xe6,0x03,0x45,0x67,0x21,0x37]
+
+ vlrlr %v0, %r0, 0
+ vlrlr %v0, %r0, 4095
+ vlrlr %v0, %r0, 0(%r15)
+ vlrlr %v0, %r15, 0
+ vlrlr %v15, %r0, 0
+ vlrlr %v31, %r0, 0
+ vlrlr %v18, %r3, 1383(%r4)
+
+#CHECK: vmsl %v0, %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0xb8]
+#CHECK: vmsl %v0, %v0, %v0, %v0, 15, 0 # encoding: [0xe7,0x00,0x0f,0x00,0x00,0xb8]
+#CHECK: vmsl %v0, %v0, %v0, %v0, 0, 12 # encoding: [0xe7,0x00,0x00,0xc0,0x00,0xb8]
+#CHECK: vmsl %v0, %v0, %v0, %v15, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xb8]
+#CHECK: vmsl %v0, %v0, %v0, %v31, 0, 0 # encoding: [0xe7,0x00,0x00,0x00,0xf1,0xb8]
+#CHECK: vmsl %v0, %v0, %v15, %v0, 0, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x00,0xb8]
+#CHECK: vmsl %v0, %v0, %v31, %v0, 0, 0 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0xb8]
+#CHECK: vmsl %v0, %v15, %v0, %v0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0xb8]
+#CHECK: vmsl %v0, %v31, %v0, %v0, 0, 0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0xb8]
+#CHECK: vmsl %v15, %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0xb8]
+#CHECK: vmsl %v31, %v0, %v0, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0xb8]
+#CHECK: vmsl %v18, %v3, %v20, %v5, 0, 4 # encoding: [0xe7,0x23,0x40,0x40,0x5a,0xb8]
+#CHECK: vmsl %v18, %v3, %v20, %v5, 11, 8 # encoding: [0xe7,0x23,0x4b,0x80,0x5a,0xb8]
+
+ vmsl %v0, %v0, %v0, %v0, 0, 0
+ vmsl %v0, %v0, %v0, %v0, 15, 0
+ vmsl %v0, %v0, %v0, %v0, 0, 12
+ vmsl %v0, %v0, %v0, %v15, 0, 0
+ vmsl %v0, %v0, %v0, %v31, 0, 0
+ vmsl %v0, %v0, %v15, %v0, 0, 0
+ vmsl %v0, %v0, %v31, %v0, 0, 0
+ vmsl %v0, %v15, %v0, %v0, 0, 0
+ vmsl %v0, %v31, %v0, %v0, 0, 0
+ vmsl %v15, %v0, %v0, %v0, 0, 0
+ vmsl %v31, %v0, %v0, %v0, 0, 0
+ vmsl %v18, %v3, %v20, %v5, 0, 4
+ vmsl %v18, %v3, %v20, %v5, 11, 8
+
+#CHECK: vmslg %v0, %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x03,0x00,0x00,0xb8]
+#CHECK: vmslg %v0, %v0, %v0, %v0, 12 # encoding: [0xe7,0x00,0x03,0xc0,0x00,0xb8]
+#CHECK: vmslg %v0, %v0, %v0, %v15, 0 # encoding: [0xe7,0x00,0x03,0x00,0xf0,0xb8]
+#CHECK: vmslg %v0, %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0x03,0x00,0xf1,0xb8]
+#CHECK: vmslg %v0, %v0, %v15, %v0, 0 # encoding: [0xe7,0x00,0xf3,0x00,0x00,0xb8]
+#CHECK: vmslg %v0, %v0, %v31, %v0, 0 # encoding: [0xe7,0x00,0xf3,0x00,0x02,0xb8]
+#CHECK: vmslg %v0, %v15, %v0, %v0, 0 # encoding: [0xe7,0x0f,0x03,0x00,0x00,0xb8]
+#CHECK: vmslg %v0, %v31, %v0, %v0, 0 # encoding: [0xe7,0x0f,0x03,0x00,0x04,0xb8]
+#CHECK: vmslg %v15, %v0, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x03,0x00,0x00,0xb8]
+#CHECK: vmslg %v31, %v0, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x03,0x00,0x08,0xb8]
+#CHECK: vmslg %v18, %v3, %v20, %v5, 4 # encoding: [0xe7,0x23,0x43,0x40,0x5a,0xb8]
+#CHECK: vmslg %v18, %v3, %v20, %v5, 8 # encoding: [0xe7,0x23,0x43,0x80,0x5a,0xb8]
+
+ vmslg %v0, %v0, %v0, %v0, 0
+ vmslg %v0, %v0, %v0, %v0, 12
+ vmslg %v0, %v0, %v0, %v15, 0
+ vmslg %v0, %v0, %v0, %v31, 0
+ vmslg %v0, %v0, %v15, %v0, 0
+ vmslg %v0, %v0, %v31, %v0, 0
+ vmslg %v0, %v15, %v0, %v0, 0
+ vmslg %v0, %v31, %v0, %v0, 0
+ vmslg %v15, %v0, %v0, %v0, 0
+ vmslg %v31, %v0, %v0, %v0, 0
+ vmslg %v18, %v3, %v20, %v5, 4
+ vmslg %v18, %v3, %v20, %v5, 8
+
+#CHECK: vmp %v0, %v0, %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x78]
+#CHECK: vmp %v0, %v0, %v0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x78]
+#CHECK: vmp %v0, %v0, %v0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x78]
+#CHECK: vmp %v0, %v0, %v31, 0, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x02,0x78]
+#CHECK: vmp %v0, %v31, %v0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x78]
+#CHECK: vmp %v31, %v0, %v0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x78]
+#CHECK: vmp %v13, %v17, %v21, 121, 11 # encoding: [0xe6,0xd1,0x50,0xb7,0x96,0x78]
+
+ vmp %v0, %v0, %v0, 0, 0
+ vmp %v0, %v0, %v0, 0, 15
+ vmp %v0, %v0, %v0, 255, 0
+ vmp %v0, %v0, %v31, 0, 0
+ vmp %v0, %v31, %v0, 0, 0
+ vmp %v31, %v0, %v0, 0, 0
+ vmp %v13, %v17, %v21, 0x79, 11
+
+#CHECK: vmsp %v0, %v0, %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x79]
+#CHECK: vmsp %v0, %v0, %v0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x79]
+#CHECK: vmsp %v0, %v0, %v0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x79]
+#CHECK: vmsp %v0, %v0, %v31, 0, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x02,0x79]
+#CHECK: vmsp %v0, %v31, %v0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x79]
+#CHECK: vmsp %v31, %v0, %v0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x79]
+#CHECK: vmsp %v13, %v17, %v21, 121, 11 # encoding: [0xe6,0xd1,0x50,0xb7,0x96,0x79]
+
+ vmsp %v0, %v0, %v0, 0, 0
+ vmsp %v0, %v0, %v0, 0, 15
+ vmsp %v0, %v0, %v0, 255, 0
+ vmsp %v0, %v0, %v31, 0, 0
+ vmsp %v0, %v31, %v0, 0, 0
+ vmsp %v31, %v0, %v0, 0, 0
+ vmsp %v13, %v17, %v21, 0x79, 11
+
+#CHECK: vnn %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x6e]
+#CHECK: vnn %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x6e]
+#CHECK: vnn %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x6e]
+#CHECK: vnn %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x6e]
+#CHECK: vnn %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x6e]
+
+ vnn %v0, %v0, %v0
+ vnn %v0, %v0, %v31
+ vnn %v0, %v31, %v0
+ vnn %v31, %v0, %v0
+ vnn %v18, %v3, %v20
+
+#CHECK: vnx %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x6c]
+#CHECK: vnx %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x6c]
+#CHECK: vnx %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x6c]
+#CHECK: vnx %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x6c]
+#CHECK: vnx %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x6c]
+
+ vnx %v0, %v0, %v0
+ vnx %v0, %v0, %v31
+ vnx %v0, %v31, %v0
+ vnx %v31, %v0, %v0
+ vnx %v18, %v3, %v20
+
+#CHECK: voc %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x6f]
+#CHECK: voc %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x00,0x02,0x6f]
+#CHECK: voc %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x6f]
+#CHECK: voc %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x6f]
+#CHECK: voc %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x00,0x0a,0x6f]
+
+ voc %v0, %v0, %v0
+ voc %v0, %v0, %v31
+ voc %v0, %v31, %v0
+ voc %v31, %v0, %v0
+ voc %v18, %v3, %v20
+
+#CHECK: vpkz %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x34]
+#CHECK: vpkz %v0, 4095, 0 # encoding: [0xe6,0x00,0x0f,0xff,0x00,0x34]
+#CHECK: vpkz %v0, 0(%r15), 0 # encoding: [0xe6,0x00,0xf0,0x00,0x00,0x34]
+#CHECK: vpkz %v0, 0, 255 # encoding: [0xe6,0xff,0x00,0x00,0x00,0x34]
+#CHECK: vpkz %v15, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf0,0x34]
+#CHECK: vpkz %v31, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf1,0x34]
+#CHECK: vpkz %v18, 1383(%r4), 3 # encoding: [0xe6,0x03,0x45,0x67,0x21,0x34]
+
+ vpkz %v0, 0, 0
+ vpkz %v0, 4095, 0
+ vpkz %v0, 0(%r15), 0
+ vpkz %v0, 0, 255
+ vpkz %v15, 0, 0
+ vpkz %v31, 0, 0
+ vpkz %v18, 1383(%r4), 3
+
+#CHECK: vpopctb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x00,0x50]
+#CHECK: vpopctb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x00,0x50]
+#CHECK: vpopctb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x50]
+#CHECK: vpopctb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x50]
+#CHECK: vpopctb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x50]
+#CHECK: vpopctb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x04,0x50]
+
+ vpopctb %v0, %v0
+ vpopctb %v0, %v15
+ vpopctb %v0, %v31
+ vpopctb %v15, %v0
+ vpopctb %v31, %v0
+ vpopctb %v14, %v17
+
+#CHECK: vpopctf %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0x50]
+#CHECK: vpopctf %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0x50]
+#CHECK: vpopctf %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0x50]
+#CHECK: vpopctf %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0x50]
+#CHECK: vpopctf %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0x50]
+#CHECK: vpopctf %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x24,0x50]
+
+ vpopctf %v0, %v0
+ vpopctf %v0, %v15
+ vpopctf %v0, %v31
+ vpopctf %v15, %v0
+ vpopctf %v31, %v0
+ vpopctf %v14, %v17
+
+#CHECK: vpopctg %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x30,0x50]
+#CHECK: vpopctg %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x30,0x50]
+#CHECK: vpopctg %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x34,0x50]
+#CHECK: vpopctg %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x30,0x50]
+#CHECK: vpopctg %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x38,0x50]
+#CHECK: vpopctg %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x34,0x50]
+
+ vpopctg %v0, %v0
+ vpopctg %v0, %v15
+ vpopctg %v0, %v31
+ vpopctg %v15, %v0
+ vpopctg %v31, %v0
+ vpopctg %v14, %v17
+
+#CHECK: vpopcth %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x10,0x50]
+#CHECK: vpopcth %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x10,0x50]
+#CHECK: vpopcth %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x14,0x50]
+#CHECK: vpopcth %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x10,0x50]
+#CHECK: vpopcth %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x18,0x50]
+#CHECK: vpopcth %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x14,0x50]
+
+ vpopcth %v0, %v0
+ vpopcth %v0, %v15
+ vpopcth %v0, %v31
+ vpopcth %v15, %v0
+ vpopcth %v31, %v0
+ vpopcth %v14, %v17
+
+#CHECK: vpsop %v0, %v0, 0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x5b]
+#CHECK: vpsop %v0, %v0, 0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x5b]
+#CHECK: vpsop %v0, %v0, 0, 255, 0 # encoding: [0xe6,0x00,0xff,0x00,0x00,0x5b]
+#CHECK: vpsop %v0, %v0, 255, 0, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x5b]
+#CHECK: vpsop %v0, %v31, 0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x5b]
+#CHECK: vpsop %v31, %v0, 0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x5b]
+#CHECK: vpsop %v13, %v17, 52, 121, 11 # encoding: [0xe6,0xd1,0x79,0xb3,0x44,0x5b]
+
+ vpsop %v0, %v0, 0, 0, 0
+ vpsop %v0, %v0, 0, 0, 15
+ vpsop %v0, %v0, 0, 255, 0
+ vpsop %v0, %v0, 255, 0, 0
+ vpsop %v0, %v31, 0, 0, 0
+ vpsop %v31, %v0, 0, 0, 0
+ vpsop %v13, %v17, 0x34, 0x79, 11
+
+#CHECK: vrp %v0, %v0, %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x7b]
+#CHECK: vrp %v0, %v0, %v0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x7b]
+#CHECK: vrp %v0, %v0, %v0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x7b]
+#CHECK: vrp %v0, %v0, %v31, 0, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x02,0x7b]
+#CHECK: vrp %v0, %v31, %v0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x7b]
+#CHECK: vrp %v31, %v0, %v0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x7b]
+#CHECK: vrp %v13, %v17, %v21, 121, 11 # encoding: [0xe6,0xd1,0x50,0xb7,0x96,0x7b]
+
+ vrp %v0, %v0, %v0, 0, 0
+ vrp %v0, %v0, %v0, 0, 15
+ vrp %v0, %v0, %v0, 255, 0
+ vrp %v0, %v0, %v31, 0, 0
+ vrp %v0, %v31, %v0, 0, 0
+ vrp %v31, %v0, %v0, 0, 0
+ vrp %v13, %v17, %v21, 0x79, 11
+
+#CHECK: vsdp %v0, %v0, %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x7e]
+#CHECK: vsdp %v0, %v0, %v0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x7e]
+#CHECK: vsdp %v0, %v0, %v0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x7e]
+#CHECK: vsdp %v0, %v0, %v31, 0, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x02,0x7e]
+#CHECK: vsdp %v0, %v31, %v0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x7e]
+#CHECK: vsdp %v31, %v0, %v0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x7e]
+#CHECK: vsdp %v13, %v17, %v21, 121, 11 # encoding: [0xe6,0xd1,0x50,0xb7,0x96,0x7e]
+
+ vsdp %v0, %v0, %v0, 0, 0
+ vsdp %v0, %v0, %v0, 0, 15
+ vsdp %v0, %v0, %v0, 255, 0
+ vsdp %v0, %v0, %v31, 0, 0
+ vsdp %v0, %v31, %v0, 0, 0
+ vsdp %v31, %v0, %v0, 0, 0
+ vsdp %v13, %v17, %v21, 0x79, 11
+
+#CHECK: vsp %v0, %v0, %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x73]
+#CHECK: vsp %v0, %v0, %v0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x73]
+#CHECK: vsp %v0, %v0, %v0, 255, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x73]
+#CHECK: vsp %v0, %v0, %v31, 0, 0 # encoding: [0xe6,0x00,0xf0,0x00,0x02,0x73]
+#CHECK: vsp %v0, %v31, %v0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x73]
+#CHECK: vsp %v31, %v0, %v0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x73]
+#CHECK: vsp %v13, %v17, %v21, 121, 11 # encoding: [0xe6,0xd1,0x50,0xb7,0x96,0x73]
+
+ vsp %v0, %v0, %v0, 0, 0
+ vsp %v0, %v0, %v0, 0, 15
+ vsp %v0, %v0, %v0, 255, 0
+ vsp %v0, %v0, %v31, 0, 0
+ vsp %v0, %v31, %v0, 0, 0
+ vsp %v31, %v0, %v0, 0, 0
+ vsp %v13, %v17, %v21, 0x79, 11
+
+#CHECK: vsrp %v0, %v0, 0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x59]
+#CHECK: vsrp %v0, %v0, 0, 0, 15 # encoding: [0xe6,0x00,0x00,0xf0,0x00,0x59]
+#CHECK: vsrp %v0, %v0, 0, 255, 0 # encoding: [0xe6,0x00,0xff,0x00,0x00,0x59]
+#CHECK: vsrp %v0, %v0, 255, 0, 0 # encoding: [0xe6,0x00,0x00,0x0f,0xf0,0x59]
+#CHECK: vsrp %v0, %v31, 0, 0, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x59]
+#CHECK: vsrp %v31, %v0, 0, 0, 0 # encoding: [0xe6,0xf0,0x00,0x00,0x08,0x59]
+#CHECK: vsrp %v13, %v17, 52, 121, 11 # encoding: [0xe6,0xd1,0x79,0xb3,0x44,0x59]
+
+ vsrp %v0, %v0, 0, 0, 0
+ vsrp %v0, %v0, 0, 0, 15
+ vsrp %v0, %v0, 0, 255, 0
+ vsrp %v0, %v0, 255, 0, 0
+ vsrp %v0, %v31, 0, 0, 0
+ vsrp %v31, %v0, 0, 0, 0
+ vsrp %v13, %v17, 0x34, 0x79, 11
+
+#CHECK: vstrl %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x3d]
+#CHECK: vstrl %v0, 4095, 0 # encoding: [0xe6,0x00,0x0f,0xff,0x00,0x3d]
+#CHECK: vstrl %v0, 0(%r15), 0 # encoding: [0xe6,0x00,0xf0,0x00,0x00,0x3d]
+#CHECK: vstrl %v0, 0, 255 # encoding: [0xe6,0xff,0x00,0x00,0x00,0x3d]
+#CHECK: vstrl %v15, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf0,0x3d]
+#CHECK: vstrl %v31, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf1,0x3d]
+#CHECK: vstrl %v18, 1383(%r4), 3 # encoding: [0xe6,0x03,0x45,0x67,0x21,0x3d]
+
+ vstrl %v0, 0, 0
+ vstrl %v0, 4095, 0
+ vstrl %v0, 0(%r15), 0
+ vstrl %v0, 0, 255
+ vstrl %v15, 0, 0
+ vstrl %v31, 0, 0
+ vstrl %v18, 1383(%r4), 3
+
+#CHECK: vstrlr %v0, %r0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x3f]
+#CHECK: vstrlr %v0, %r0, 4095 # encoding: [0xe6,0x00,0x0f,0xff,0x00,0x3f]
+#CHECK: vstrlr %v0, %r0, 0(%r15) # encoding: [0xe6,0x00,0xf0,0x00,0x00,0x3f]
+#CHECK: vstrlr %v0, %r15, 0 # encoding: [0xe6,0x0f,0x00,0x00,0x00,0x3f]
+#CHECK: vstrlr %v15, %r0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf0,0x3f]
+#CHECK: vstrlr %v31, %r0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf1,0x3f]
+#CHECK: vstrlr %v18, %r3, 1383(%r4) # encoding: [0xe6,0x03,0x45,0x67,0x21,0x3f]
+
+ vstrlr %v0, %r0, 0
+ vstrlr %v0, %r0, 4095
+ vstrlr %v0, %r0, 0(%r15)
+ vstrlr %v0, %r15, 0
+ vstrlr %v15, %r0, 0
+ vstrlr %v31, %r0, 0
+ vstrlr %v18, %r3, 1383(%r4)
+
+#CHECK: vtp %v0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x5f]
+#CHECK: vtp %v15 # encoding: [0xe6,0x0f,0x00,0x00,0x00,0x5f]
+#CHECK: vtp %v31 # encoding: [0xe6,0x0f,0x00,0x00,0x04,0x5f]
+
+ vtp %v0
+ vtp %v15
+ vtp %v31
+
+#CHECK: vupkz %v0, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0x00,0x3c]
+#CHECK: vupkz %v0, 4095, 0 # encoding: [0xe6,0x00,0x0f,0xff,0x00,0x3c]
+#CHECK: vupkz %v0, 0(%r15), 0 # encoding: [0xe6,0x00,0xf0,0x00,0x00,0x3c]
+#CHECK: vupkz %v0, 0, 255 # encoding: [0xe6,0xff,0x00,0x00,0x00,0x3c]
+#CHECK: vupkz %v15, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf0,0x3c]
+#CHECK: vupkz %v31, 0, 0 # encoding: [0xe6,0x00,0x00,0x00,0xf1,0x3c]
+#CHECK: vupkz %v18, 1383(%r4), 3 # encoding: [0xe6,0x03,0x45,0x67,0x21,0x3c]
+
+ vupkz %v0, 0, 0
+ vupkz %v0, 4095, 0
+ vupkz %v0, 0(%r15), 0
+ vupkz %v0, 0, 255
+ vupkz %v15, 0, 0
+ vupkz %v31, 0, 0
+ vupkz %v18, 1383(%r4), 3
+
+#CHECK: wfasb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe3]
+#CHECK: wfasb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe3]
+#CHECK: wfasb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xe3]
+#CHECK: wfasb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xe3]
+#CHECK: wfasb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xe3]
+#CHECK: wfasb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x2a,0xe3]
+
+ wfasb %v0, %v0, %v0
+ wfasb %f0, %f0, %f0
+ wfasb %v0, %v0, %v31
+ wfasb %v0, %v31, %v0
+ wfasb %v31, %v0, %v0
+ wfasb %v18, %v3, %v20
+
+#CHECK: wfaxb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xe3]
+#CHECK: wfaxb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xe3]
+#CHECK: wfaxb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xe3]
+#CHECK: wfaxb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xe3]
+#CHECK: wfaxb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x4a,0xe3]
+
+ wfaxb %v0, %v0, %v0
+ wfaxb %v0, %v0, %v31
+ wfaxb %v0, %v31, %v0
+ wfaxb %v31, %v0, %v0
+ wfaxb %v18, %v3, %v20
+
+#CHECK: wfcsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xcb]
+#CHECK: wfcsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xcb]
+#CHECK: wfcsb %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0xcb]
+#CHECK: wfcsb %f0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xcb]
+#CHECK: wfcsb %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0xcb]
+#CHECK: wfcsb %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xcb]
+#CHECK: wfcsb %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x24,0xcb]
+
+ wfcsb %v0, %v0
+ wfcsb %f0, %f0
+ wfcsb %v0, %v15
+ wfcsb %v0, %v31
+ wfcsb %v15, %v0
+ wfcsb %v31, %v0
+ wfcsb %v14, %v17
+
+#CHECK: wfcxb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x40,0xcb]
+#CHECK: wfcxb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x40,0xcb]
+#CHECK: wfcxb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x44,0xcb]
+#CHECK: wfcxb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x40,0xcb]
+#CHECK: wfcxb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x48,0xcb]
+#CHECK: wfcxb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x44,0xcb]
+
+ wfcxb %v0, %v0
+ wfcxb %v0, %v15
+ wfcxb %v0, %v31
+ wfcxb %v15, %v0
+ wfcxb %v31, %v0
+ wfcxb %v14, %v17
+
+#CHECK: wfcesb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe8]
+#CHECK: wfcesb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe8]
+#CHECK: wfcesb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xe8]
+#CHECK: wfcesb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xe8]
+#CHECK: wfcesb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xe8]
+#CHECK: wfcesb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x2a,0xe8]
+
+ wfcesb %v0, %v0, %v0
+ wfcesb %f0, %f0, %f0
+ wfcesb %v0, %v0, %v31
+ wfcesb %v0, %v31, %v0
+ wfcesb %v31, %v0, %v0
+ wfcesb %v18, %v3, %v20
+
+#CHECK: wfcesbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x18,0x20,0xe8]
+#CHECK: wfcesbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x18,0x20,0xe8]
+#CHECK: wfcesbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x18,0x22,0xe8]
+#CHECK: wfcesbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x18,0x24,0xe8]
+#CHECK: wfcesbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x18,0x28,0xe8]
+#CHECK: wfcesbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x18,0x2a,0xe8]
+
+ wfcesbs %v0, %v0, %v0
+ wfcesbs %f0, %f0, %f0
+ wfcesbs %v0, %v0, %v31
+ wfcesbs %v0, %v31, %v0
+ wfcesbs %v31, %v0, %v0
+ wfcesbs %v18, %v3, %v20
+
+#CHECK: wfcexb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xe8]
+#CHECK: wfcexb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xe8]
+#CHECK: wfcexb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xe8]
+#CHECK: wfcexb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xe8]
+#CHECK: wfcexb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x4a,0xe8]
+
+ wfcexb %v0, %v0, %v0
+ wfcexb %v0, %v0, %v31
+ wfcexb %v0, %v31, %v0
+ wfcexb %v31, %v0, %v0
+ wfcexb %v18, %v3, %v20
+
+#CHECK: wfcexbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x18,0x40,0xe8]
+#CHECK: wfcexbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x18,0x42,0xe8]
+#CHECK: wfcexbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x18,0x44,0xe8]
+#CHECK: wfcexbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x18,0x48,0xe8]
+#CHECK: wfcexbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x18,0x4a,0xe8]
+
+ wfcexbs %v0, %v0, %v0
+ wfcexbs %v0, %v0, %v31
+ wfcexbs %v0, %v31, %v0
+ wfcexbs %v31, %v0, %v0
+ wfcexbs %v18, %v3, %v20
+
+#CHECK: wfchsb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xeb]
+#CHECK: wfchsb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xeb]
+#CHECK: wfchsb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xeb]
+#CHECK: wfchsb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xeb]
+#CHECK: wfchsb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xeb]
+#CHECK: wfchsb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x2a,0xeb]
+
+ wfchsb %v0, %v0, %v0
+ wfchsb %f0, %f0, %f0
+ wfchsb %v0, %v0, %v31
+ wfchsb %v0, %v31, %v0
+ wfchsb %v31, %v0, %v0
+ wfchsb %v18, %v3, %v20
+
+#CHECK: wfchsbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x18,0x20,0xeb]
+#CHECK: wfchsbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x18,0x20,0xeb]
+#CHECK: wfchsbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x18,0x22,0xeb]
+#CHECK: wfchsbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x18,0x24,0xeb]
+#CHECK: wfchsbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x18,0x28,0xeb]
+#CHECK: wfchsbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x18,0x2a,0xeb]
+
+ wfchsbs %v0, %v0, %v0
+ wfchsbs %f0, %f0, %f0
+ wfchsbs %v0, %v0, %v31
+ wfchsbs %v0, %v31, %v0
+ wfchsbs %v31, %v0, %v0
+ wfchsbs %v18, %v3, %v20
+
+#CHECK: wfchxb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xeb]
+#CHECK: wfchxb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xeb]
+#CHECK: wfchxb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xeb]
+#CHECK: wfchxb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xeb]
+#CHECK: wfchxb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x4a,0xeb]
+
+ wfchxb %v0, %v0, %v0
+ wfchxb %v0, %v0, %v31
+ wfchxb %v0, %v31, %v0
+ wfchxb %v31, %v0, %v0
+ wfchxb %v18, %v3, %v20
+
+#CHECK: wfchxbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x18,0x40,0xeb]
+#CHECK: wfchxbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x18,0x42,0xeb]
+#CHECK: wfchxbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x18,0x44,0xeb]
+#CHECK: wfchxbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x18,0x48,0xeb]
+#CHECK: wfchxbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x18,0x4a,0xeb]
+
+ wfchxbs %v0, %v0, %v0
+ wfchxbs %v0, %v0, %v31
+ wfchxbs %v0, %v31, %v0
+ wfchxbs %v31, %v0, %v0
+ wfchxbs %v18, %v3, %v20
+
+#CHECK: wfchesb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xea]
+#CHECK: wfchesb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xea]
+#CHECK: wfchesb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xea]
+#CHECK: wfchesb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xea]
+#CHECK: wfchesb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xea]
+#CHECK: wfchesb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x2a,0xea]
+
+ wfchesb %v0, %v0, %v0
+ wfchesb %f0, %f0, %f0
+ wfchesb %v0, %v0, %v31
+ wfchesb %v0, %v31, %v0
+ wfchesb %v31, %v0, %v0
+ wfchesb %v18, %v3, %v20
+
+#CHECK: wfchesbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x18,0x20,0xea]
+#CHECK: wfchesbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x18,0x20,0xea]
+#CHECK: wfchesbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x18,0x22,0xea]
+#CHECK: wfchesbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x18,0x24,0xea]
+#CHECK: wfchesbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x18,0x28,0xea]
+#CHECK: wfchesbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x18,0x2a,0xea]
+
+ wfchesbs %v0, %v0, %v0
+ wfchesbs %f0, %f0, %f0
+ wfchesbs %v0, %v0, %v31
+ wfchesbs %v0, %v31, %v0
+ wfchesbs %v31, %v0, %v0
+ wfchesbs %v18, %v3, %v20
+
+#CHECK: wfchexb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xea]
+#CHECK: wfchexb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xea]
+#CHECK: wfchexb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xea]
+#CHECK: wfchexb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xea]
+#CHECK: wfchexb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x4a,0xea]
+
+ wfchexb %v0, %v0, %v0
+ wfchexb %v0, %v0, %v31
+ wfchexb %v0, %v31, %v0
+ wfchexb %v31, %v0, %v0
+ wfchexb %v18, %v3, %v20
+
+#CHECK: wfchexbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x18,0x40,0xea]
+#CHECK: wfchexbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x18,0x42,0xea]
+#CHECK: wfchexbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x18,0x44,0xea]
+#CHECK: wfchexbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x18,0x48,0xea]
+#CHECK: wfchexbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x18,0x4a,0xea]
+
+ wfchexbs %v0, %v0, %v0
+ wfchexbs %v0, %v0, %v31
+ wfchexbs %v0, %v31, %v0
+ wfchexbs %v31, %v0, %v0
+ wfchexbs %v18, %v3, %v20
+
+#CHECK: wfdsb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe5]
+#CHECK: wfdsb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe5]
+#CHECK: wfdsb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xe5]
+#CHECK: wfdsb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xe5]
+#CHECK: wfdsb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xe5]
+#CHECK: wfdsb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x2a,0xe5]
+
+ wfdsb %v0, %v0, %v0
+ wfdsb %f0, %f0, %f0
+ wfdsb %v0, %v0, %v31
+ wfdsb %v0, %v31, %v0
+ wfdsb %v31, %v0, %v0
+ wfdsb %v18, %v3, %v20
+
+#CHECK: wfdxb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xe5]
+#CHECK: wfdxb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xe5]
+#CHECK: wfdxb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xe5]
+#CHECK: wfdxb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xe5]
+#CHECK: wfdxb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x4a,0xe5]
+
+ wfdxb %v0, %v0, %v0
+ wfdxb %v0, %v0, %v31
+ wfdxb %v0, %v31, %v0
+ wfdxb %v31, %v0, %v0
+ wfdxb %v18, %v3, %v20
+
+#CHECK: wfisb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xc7]
+#CHECK: wfisb %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xc7]
+#CHECK: wfisb %f0, %f0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x20,0xc7]
+#CHECK: wfisb %f0, %f0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x20,0xc7]
+#CHECK: wfisb %f0, %f0, 7, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x20,0xc7]
+#CHECK: wfisb %f0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xc7]
+#CHECK: wfisb %v31, %f0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xc7]
+#CHECK: wfisb %f14, %v17, 4, 10 # encoding: [0xe7,0xe1,0x00,0xac,0x24,0xc7]
+
+ wfisb %v0, %v0, 0, 0
+ wfisb %f0, %f0, 0, 0
+ wfisb %v0, %v0, 0, 15
+ wfisb %v0, %v0, 4, 0
+ wfisb %v0, %v0, 7, 0
+ wfisb %v0, %v31, 0, 0
+ wfisb %v31, %v0, 0, 0
+ wfisb %v14, %v17, 4, 10
+
+#CHECK: wfixb %v0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xc7]
+#CHECK: wfixb %v0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x40,0xc7]
+#CHECK: wfixb %v0, %v0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x40,0xc7]
+#CHECK: wfixb %v0, %v0, 7, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x40,0xc7]
+#CHECK: wfixb %v0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xc7]
+#CHECK: wfixb %v31, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xc7]
+#CHECK: wfixb %v14, %v17, 4, 10 # encoding: [0xe7,0xe1,0x00,0xac,0x44,0xc7]
+
+ wfixb %v0, %v0, 0, 0
+ wfixb %v0, %v0, 0, 15
+ wfixb %v0, %v0, 4, 0
+ wfixb %v0, %v0, 7, 0
+ wfixb %v0, %v31, 0, 0
+ wfixb %v31, %v0, 0, 0
+ wfixb %v14, %v17, 4, 10
+
+#CHECK: wfksb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xca]
+#CHECK: wfksb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x00,0x20,0xca]
+#CHECK: wfksb %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x00,0x20,0xca]
+#CHECK: wfksb %f0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x24,0xca]
+#CHECK: wfksb %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x00,0x20,0xca]
+#CHECK: wfksb %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x00,0x28,0xca]
+#CHECK: wfksb %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x24,0xca]
+
+ wfksb %v0, %v0
+ wfksb %f0, %f0
+ wfksb %v0, %v15
+ wfksb %v0, %v31
+ wfksb %v15, %v0
+ wfksb %v31, %v0
+ wfksb %v14, %v17
+
+#CHECK: wfkxb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x00,0x40,0xca]
+#CHECK: wfkxb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x00,0x40,0xca]
+#CHECK: wfkxb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x00,0x44,0xca]
+#CHECK: wfkxb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x40,0xca]
+#CHECK: wfkxb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x00,0x48,0xca]
+#CHECK: wfkxb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x00,0x44,0xca]
+
+ wfkxb %v0, %v0
+ wfkxb %v0, %v15
+ wfkxb %v0, %v31
+ wfkxb %v15, %v0
+ wfkxb %v31, %v0
+ wfkxb %v14, %v17
+
+#CHECK: wfkedb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xe8]
+#CHECK: wfkedb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xe8]
+#CHECK: wfkedb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x32,0xe8]
+#CHECK: wfkedb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x0c,0x34,0xe8]
+#CHECK: wfkedb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x0c,0x38,0xe8]
+#CHECK: wfkedb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x3a,0xe8]
+
+ wfkedb %v0, %v0, %v0
+ wfkedb %f0, %f0, %f0
+ wfkedb %v0, %v0, %v31
+ wfkedb %v0, %v31, %v0
+ wfkedb %v31, %v0, %v0
+ wfkedb %v18, %v3, %v20
+
+#CHECK: wfkedbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x30,0xe8]
+#CHECK: wfkedbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x30,0xe8]
+#CHECK: wfkedbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x32,0xe8]
+#CHECK: wfkedbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x1c,0x34,0xe8]
+#CHECK: wfkedbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x1c,0x38,0xe8]
+#CHECK: wfkedbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x3a,0xe8]
+
+ wfkedbs %v0, %v0, %v0
+ wfkedbs %f0, %f0, %f0
+ wfkedbs %v0, %v0, %v31
+ wfkedbs %v0, %v31, %v0
+ wfkedbs %v31, %v0, %v0
+ wfkedbs %v18, %v3, %v20
+
+#CHECK: wfkesb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x20,0xe8]
+#CHECK: wfkesb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x20,0xe8]
+#CHECK: wfkesb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x22,0xe8]
+#CHECK: wfkesb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x0c,0x24,0xe8]
+#CHECK: wfkesb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x0c,0x28,0xe8]
+#CHECK: wfkesb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x2a,0xe8]
+
+ wfkesb %v0, %v0, %v0
+ wfkesb %f0, %f0, %f0
+ wfkesb %v0, %v0, %v31
+ wfkesb %v0, %v31, %v0
+ wfkesb %v31, %v0, %v0
+ wfkesb %v18, %v3, %v20
+
+#CHECK: wfkesbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x20,0xe8]
+#CHECK: wfkesbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x20,0xe8]
+#CHECK: wfkesbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x22,0xe8]
+#CHECK: wfkesbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x1c,0x24,0xe8]
+#CHECK: wfkesbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x1c,0x28,0xe8]
+#CHECK: wfkesbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x2a,0xe8]
+
+ wfkesbs %v0, %v0, %v0
+ wfkesbs %f0, %f0, %f0
+ wfkesbs %v0, %v0, %v31
+ wfkesbs %v0, %v31, %v0
+ wfkesbs %v31, %v0, %v0
+ wfkesbs %v18, %v3, %v20
+
+#CHECK: wfkexb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x0c,0x40,0xe8]
+#CHECK: wfkexb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x42,0xe8]
+#CHECK: wfkexb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x0c,0x44,0xe8]
+#CHECK: wfkexb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x0c,0x48,0xe8]
+#CHECK: wfkexb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x4a,0xe8]
+
+ wfkexb %v0, %v0, %v0
+ wfkexb %v0, %v0, %v31
+ wfkexb %v0, %v31, %v0
+ wfkexb %v31, %v0, %v0
+ wfkexb %v18, %v3, %v20
+
+#CHECK: wfkexbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x1c,0x40,0xe8]
+#CHECK: wfkexbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x42,0xe8]
+#CHECK: wfkexbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x1c,0x44,0xe8]
+#CHECK: wfkexbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x1c,0x48,0xe8]
+#CHECK: wfkexbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x4a,0xe8]
+
+ wfkexbs %v0, %v0, %v0
+ wfkexbs %v0, %v0, %v31
+ wfkexbs %v0, %v31, %v0
+ wfkexbs %v31, %v0, %v0
+ wfkexbs %v18, %v3, %v20
+
+#CHECK: wfkhdb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xeb]
+#CHECK: wfkhdb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xeb]
+#CHECK: wfkhdb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x32,0xeb]
+#CHECK: wfkhdb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x0c,0x34,0xeb]
+#CHECK: wfkhdb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x0c,0x38,0xeb]
+#CHECK: wfkhdb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x3a,0xeb]
+
+ wfkhdb %v0, %v0, %v0
+ wfkhdb %f0, %f0, %f0
+ wfkhdb %v0, %v0, %v31
+ wfkhdb %v0, %v31, %v0
+ wfkhdb %v31, %v0, %v0
+ wfkhdb %v18, %v3, %v20
+
+#CHECK: wfkhdbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x30,0xeb]
+#CHECK: wfkhdbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x30,0xeb]
+#CHECK: wfkhdbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x32,0xeb]
+#CHECK: wfkhdbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x1c,0x34,0xeb]
+#CHECK: wfkhdbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x1c,0x38,0xeb]
+#CHECK: wfkhdbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x3a,0xeb]
+
+ wfkhdbs %v0, %v0, %v0
+ wfkhdbs %f0, %f0, %f0
+ wfkhdbs %v0, %v0, %v31
+ wfkhdbs %v0, %v31, %v0
+ wfkhdbs %v31, %v0, %v0
+ wfkhdbs %v18, %v3, %v20
+
+#CHECK: wfkhsb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x20,0xeb]
+#CHECK: wfkhsb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x20,0xeb]
+#CHECK: wfkhsb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x22,0xeb]
+#CHECK: wfkhsb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x0c,0x24,0xeb]
+#CHECK: wfkhsb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x0c,0x28,0xeb]
+#CHECK: wfkhsb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x2a,0xeb]
+
+ wfkhsb %v0, %v0, %v0
+ wfkhsb %f0, %f0, %f0
+ wfkhsb %v0, %v0, %v31
+ wfkhsb %v0, %v31, %v0
+ wfkhsb %v31, %v0, %v0
+ wfkhsb %v18, %v3, %v20
+
+#CHECK: wfkhsbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x20,0xeb]
+#CHECK: wfkhsbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x20,0xeb]
+#CHECK: wfkhsbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x22,0xeb]
+#CHECK: wfkhsbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x1c,0x24,0xeb]
+#CHECK: wfkhsbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x1c,0x28,0xeb]
+#CHECK: wfkhsbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x2a,0xeb]
+
+ wfkhsbs %v0, %v0, %v0
+ wfkhsbs %f0, %f0, %f0
+ wfkhsbs %v0, %v0, %v31
+ wfkhsbs %v0, %v31, %v0
+ wfkhsbs %v31, %v0, %v0
+ wfkhsbs %v18, %v3, %v20
+
+#CHECK: wfkhxb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x0c,0x40,0xeb]
+#CHECK: wfkhxb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x42,0xeb]
+#CHECK: wfkhxb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x0c,0x44,0xeb]
+#CHECK: wfkhxb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x0c,0x48,0xeb]
+#CHECK: wfkhxb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x4a,0xeb]
+
+ wfkhxb %v0, %v0, %v0
+ wfkhxb %v0, %v0, %v31
+ wfkhxb %v0, %v31, %v0
+ wfkhxb %v31, %v0, %v0
+ wfkhxb %v18, %v3, %v20
+
+#CHECK: wfkhxbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x1c,0x40,0xeb]
+#CHECK: wfkhxbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x42,0xeb]
+#CHECK: wfkhxbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x1c,0x44,0xeb]
+#CHECK: wfkhxbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x1c,0x48,0xeb]
+#CHECK: wfkhxbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x4a,0xeb]
+
+ wfkhxbs %v0, %v0, %v0
+ wfkhxbs %v0, %v0, %v31
+ wfkhxbs %v0, %v31, %v0
+ wfkhxbs %v31, %v0, %v0
+ wfkhxbs %v18, %v3, %v20
+
+#CHECK: wfkhedb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xea]
+#CHECK: wfkhedb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xea]
+#CHECK: wfkhedb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x32,0xea]
+#CHECK: wfkhedb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x0c,0x34,0xea]
+#CHECK: wfkhedb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x0c,0x38,0xea]
+#CHECK: wfkhedb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x3a,0xea]
+
+ wfkhedb %v0, %v0, %v0
+ wfkhedb %f0, %f0, %f0
+ wfkhedb %v0, %v0, %v31
+ wfkhedb %v0, %v31, %v0
+ wfkhedb %v31, %v0, %v0
+ wfkhedb %v18, %v3, %v20
+
+#CHECK: wfkhedbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x30,0xea]
+#CHECK: wfkhedbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x30,0xea]
+#CHECK: wfkhedbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x32,0xea]
+#CHECK: wfkhedbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x1c,0x34,0xea]
+#CHECK: wfkhedbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x1c,0x38,0xea]
+#CHECK: wfkhedbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x3a,0xea]
+
+ wfkhedbs %v0, %v0, %v0
+ wfkhedbs %f0, %f0, %f0
+ wfkhedbs %v0, %v0, %v31
+ wfkhedbs %v0, %v31, %v0
+ wfkhedbs %v31, %v0, %v0
+ wfkhedbs %v18, %v3, %v20
+
+#CHECK: wfkhesb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x20,0xea]
+#CHECK: wfkhesb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x0c,0x20,0xea]
+#CHECK: wfkhesb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x22,0xea]
+#CHECK: wfkhesb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x0c,0x24,0xea]
+#CHECK: wfkhesb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x0c,0x28,0xea]
+#CHECK: wfkhesb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x2a,0xea]
+
+ wfkhesb %v0, %v0, %v0
+ wfkhesb %f0, %f0, %f0
+ wfkhesb %v0, %v0, %v31
+ wfkhesb %v0, %v31, %v0
+ wfkhesb %v31, %v0, %v0
+ wfkhesb %v18, %v3, %v20
+
+#CHECK: wfkhesbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x20,0xea]
+#CHECK: wfkhesbs %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x1c,0x20,0xea]
+#CHECK: wfkhesbs %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x22,0xea]
+#CHECK: wfkhesbs %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x1c,0x24,0xea]
+#CHECK: wfkhesbs %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x1c,0x28,0xea]
+#CHECK: wfkhesbs %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x2a,0xea]
+
+ wfkhesbs %v0, %v0, %v0
+ wfkhesbs %f0, %f0, %f0
+ wfkhesbs %v0, %v0, %v31
+ wfkhesbs %v0, %v31, %v0
+ wfkhesbs %v31, %v0, %v0
+ wfkhesbs %v18, %v3, %v20
+
+#CHECK: wfkhexb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x0c,0x40,0xea]
+#CHECK: wfkhexb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x0c,0x42,0xea]
+#CHECK: wfkhexb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x0c,0x44,0xea]
+#CHECK: wfkhexb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x0c,0x48,0xea]
+#CHECK: wfkhexb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x0c,0x4a,0xea]
+
+ wfkhexb %v0, %v0, %v0
+ wfkhexb %v0, %v0, %v31
+ wfkhexb %v0, %v31, %v0
+ wfkhexb %v31, %v0, %v0
+ wfkhexb %v18, %v3, %v20
+
+#CHECK: wfkhexbs %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x1c,0x40,0xea]
+#CHECK: wfkhexbs %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x1c,0x42,0xea]
+#CHECK: wfkhexbs %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x1c,0x44,0xea]
+#CHECK: wfkhexbs %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x1c,0x48,0xea]
+#CHECK: wfkhexbs %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x1c,0x4a,0xea]
+
+ wfkhexbs %v0, %v0, %v0
+ wfkhexbs %v0, %v0, %v31
+ wfkhexbs %v0, %v31, %v0
+ wfkhexbs %v31, %v0, %v0
+ wfkhexbs %v18, %v3, %v20
+
+#CHECK: wfpsosb %f0, %f0, 3 # encoding: [0xe7,0x00,0x00,0x38,0x20,0xcc]
+#CHECK: wfpsosb %f0, %f0, 3 # encoding: [0xe7,0x00,0x00,0x38,0x20,0xcc]
+#CHECK: wfpsosb %f0, %f0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x20,0xcc]
+#CHECK: wfpsosb %f0, %f15, 3 # encoding: [0xe7,0x0f,0x00,0x38,0x20,0xcc]
+#CHECK: wfpsosb %f0, %v31, 3 # encoding: [0xe7,0x0f,0x00,0x38,0x24,0xcc]
+#CHECK: wfpsosb %f15, %f0, 3 # encoding: [0xe7,0xf0,0x00,0x38,0x20,0xcc]
+#CHECK: wfpsosb %v31, %f0, 3 # encoding: [0xe7,0xf0,0x00,0x38,0x28,0xcc]
+#CHECK: wfpsosb %f14, %v17, 7 # encoding: [0xe7,0xe1,0x00,0x78,0x24,0xcc]
+
+ wfpsosb %v0, %v0, 3
+ wfpsosb %f0, %f0, 3
+ wfpsosb %v0, %v0, 15
+ wfpsosb %v0, %v15, 3
+ wfpsosb %v0, %v31, 3
+ wfpsosb %v15, %v0, 3
+ wfpsosb %v31, %v0, 3
+ wfpsosb %v14, %v17, 7
+
+#CHECK: wfpsoxb %v0, %v0, 3 # encoding: [0xe7,0x00,0x00,0x38,0x40,0xcc]
+#CHECK: wfpsoxb %v0, %v0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x40,0xcc]
+#CHECK: wfpsoxb %v0, %v15, 3 # encoding: [0xe7,0x0f,0x00,0x38,0x40,0xcc]
+#CHECK: wfpsoxb %v0, %v31, 3 # encoding: [0xe7,0x0f,0x00,0x38,0x44,0xcc]
+#CHECK: wfpsoxb %v15, %v0, 3 # encoding: [0xe7,0xf0,0x00,0x38,0x40,0xcc]
+#CHECK: wfpsoxb %v31, %v0, 3 # encoding: [0xe7,0xf0,0x00,0x38,0x48,0xcc]
+#CHECK: wfpsoxb %v14, %v17, 7 # encoding: [0xe7,0xe1,0x00,0x78,0x44,0xcc]
+
+ wfpsoxb %v0, %v0, 3
+ wfpsoxb %v0, %v0, 15
+ wfpsoxb %v0, %v15, 3
+ wfpsoxb %v0, %v31, 3
+ wfpsoxb %v15, %v0, 3
+ wfpsoxb %v31, %v0, 3
+ wfpsoxb %v14, %v17, 7
+
+#CHECK: wflcsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xcc]
+#CHECK: wflcsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xcc]
+#CHECK: wflcsb %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x08,0x20,0xcc]
+#CHECK: wflcsb %f0, %v31 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xcc]
+#CHECK: wflcsb %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x20,0xcc]
+#CHECK: wflcsb %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xcc]
+#CHECK: wflcsb %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x08,0x24,0xcc]
+
+ wflcsb %v0, %v0
+ wflcsb %f0, %f0
+ wflcsb %v0, %v15
+ wflcsb %v0, %v31
+ wflcsb %v15, %v0
+ wflcsb %v31, %v0
+ wflcsb %v14, %v17
+
+#CHECK: wflcxb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xcc]
+#CHECK: wflcxb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x08,0x40,0xcc]
+#CHECK: wflcxb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xcc]
+#CHECK: wflcxb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x40,0xcc]
+#CHECK: wflcxb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xcc]
+#CHECK: wflcxb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x08,0x44,0xcc]
+
+ wflcxb %v0, %v0
+ wflcxb %v0, %v15
+ wflcxb %v0, %v31
+ wflcxb %v15, %v0
+ wflcxb %v31, %v0
+ wflcxb %v14, %v17
+
+#CHECK: wflnsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x18,0x20,0xcc]
+#CHECK: wflnsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x18,0x20,0xcc]
+#CHECK: wflnsb %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x18,0x20,0xcc]
+#CHECK: wflnsb %f0, %v31 # encoding: [0xe7,0x0f,0x00,0x18,0x24,0xcc]
+#CHECK: wflnsb %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x18,0x20,0xcc]
+#CHECK: wflnsb %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x18,0x28,0xcc]
+#CHECK: wflnsb %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x18,0x24,0xcc]
+
+ wflnsb %v0, %v0
+ wflnsb %f0, %f0
+ wflnsb %v0, %v15
+ wflnsb %v0, %v31
+ wflnsb %v15, %v0
+ wflnsb %v31, %v0
+ wflnsb %v14, %v17
+
+#CHECK: wflnxb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x18,0x40,0xcc]
+#CHECK: wflnxb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x18,0x40,0xcc]
+#CHECK: wflnxb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x18,0x44,0xcc]
+#CHECK: wflnxb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x18,0x40,0xcc]
+#CHECK: wflnxb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x18,0x48,0xcc]
+#CHECK: wflnxb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x18,0x44,0xcc]
+
+ wflnxb %v0, %v0
+ wflnxb %v0, %v15
+ wflnxb %v0, %v31
+ wflnxb %v15, %v0
+ wflnxb %v31, %v0
+ wflnxb %v14, %v17
+
+#CHECK: wflpsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x28,0x20,0xcc]
+#CHECK: wflpsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x28,0x20,0xcc]
+#CHECK: wflpsb %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x28,0x20,0xcc]
+#CHECK: wflpsb %f0, %v31 # encoding: [0xe7,0x0f,0x00,0x28,0x24,0xcc]
+#CHECK: wflpsb %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x28,0x20,0xcc]
+#CHECK: wflpsb %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x28,0x28,0xcc]
+#CHECK: wflpsb %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x28,0x24,0xcc]
+
+ wflpsb %v0, %v0
+ wflpsb %f0, %f0
+ wflpsb %v0, %v15
+ wflpsb %v0, %v31
+ wflpsb %v15, %v0
+ wflpsb %v31, %v0
+ wflpsb %v14, %v17
+
+#CHECK: wflpxb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x28,0x40,0xcc]
+#CHECK: wflpxb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x28,0x40,0xcc]
+#CHECK: wflpxb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x28,0x44,0xcc]
+#CHECK: wflpxb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x28,0x40,0xcc]
+#CHECK: wflpxb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x28,0x48,0xcc]
+#CHECK: wflpxb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x28,0x44,0xcc]
+
+ wflpxb %v0, %v0
+ wflpxb %v0, %v15
+ wflpxb %v0, %v31
+ wflpxb %v15, %v0
+ wflpxb %v31, %v0
+ wflpxb %v14, %v17
+
+#CHECK: wflls %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xc4]
+#CHECK: wflls %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xc4]
+#CHECK: wflls %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x08,0x20,0xc4]
+#CHECK: wflls %f0, %v31 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xc4]
+#CHECK: wflls %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x20,0xc4]
+#CHECK: wflls %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xc4]
+#CHECK: wflls %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x08,0x24,0xc4]
+
+ wflls %v0, %v0
+ wflls %f0, %f0
+ wflls %v0, %v15
+ wflls %v0, %v31
+ wflls %v15, %v0
+ wflls %v31, %v0
+ wflls %v14, %v17
+
+#CHECK: wflld %v0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc4]
+#CHECK: wflld %v0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc4]
+#CHECK: wflld %v0, %f15 # encoding: [0xe7,0x0f,0x00,0x08,0x30,0xc4]
+#CHECK: wflld %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x08,0x34,0xc4]
+#CHECK: wflld %v15, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x30,0xc4]
+#CHECK: wflld %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0xc4]
+#CHECK: wflld %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x08,0x34,0xc4]
+
+ wflld %v0, %v0
+ wflld %v0, %f0
+ wflld %v0, %v15
+ wflld %v0, %v31
+ wflld %v15, %v0
+ wflld %v31, %v0
+ wflld %v14, %v17
+
+#CHECK: wflrd %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc5]
+#CHECK: wflrd %f0, %f0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xc5]
+#CHECK: wflrd %f0, %f0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x30,0xc5]
+#CHECK: wflrd %f0, %f0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc5]
+#CHECK: wflrd %f0, %f0, 12, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x30,0xc5]
+#CHECK: wflrd %f0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x34,0xc5]
+#CHECK: wflrd %v31, %f0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0xc5]
+#CHECK: wflrd %f14, %v17, 4, 10 # encoding: [0xe7,0xe1,0x00,0xac,0x34,0xc5]
+
+ wflrd %v0, %v0, 0, 0
+ wflrd %f0, %f0, 0, 0
+ wflrd %v0, %v0, 0, 15
+ wflrd %v0, %v0, 4, 0
+ wflrd %v0, %v0, 12, 0
+ wflrd %v0, %v31, 0, 0
+ wflrd %v31, %v0, 0, 0
+ wflrd %v14, %v17, 4, 10
+
+#CHECK: wflrx %f0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xc5]
+#CHECK: wflrx %f0, %v0, 0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xc5]
+#CHECK: wflrx %f0, %v0, 0, 15 # encoding: [0xe7,0x00,0x00,0xf8,0x40,0xc5]
+#CHECK: wflrx %f0, %v0, 4, 0 # encoding: [0xe7,0x00,0x00,0x0c,0x40,0xc5]
+#CHECK: wflrx %f0, %v0, 7, 0 # encoding: [0xe7,0x00,0x00,0x0f,0x40,0xc5]
+#CHECK: wflrx %f0, %v31, 0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xc5]
+#CHECK: wflrx %v31, %v0, 0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xc5]
+#CHECK: wflrx %f14, %v17, 4, 10 # encoding: [0xe7,0xe1,0x00,0xac,0x44,0xc5]
+
+ wflrx %v0, %v0, 0, 0
+ wflrx %f0, %v0, 0, 0
+ wflrx %v0, %v0, 0, 15
+ wflrx %v0, %v0, 4, 0
+ wflrx %v0, %v0, 7, 0
+ wflrx %v0, %v31, 0, 0
+ wflrx %v31, %v0, 0, 0
+ wflrx %v14, %v17, 4, 10
+
+#CHECK: wfmaxdb %f0, %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xef]
+#CHECK: wfmaxdb %f0, %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xef]
+#CHECK: wfmaxdb %f0, %f0, %f0, 4 # encoding: [0xe7,0x00,0x00,0x48,0x30,0xef]
+#CHECK: wfmaxdb %f0, %f0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x08,0x32,0xef]
+#CHECK: wfmaxdb %f0, %v31, %f0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x34,0xef]
+#CHECK: wfmaxdb %v31, %f0, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0xef]
+#CHECK: wfmaxdb %v18, %f3, %v20, 11 # encoding: [0xe7,0x23,0x40,0xb8,0x3a,0xef]
+
+ wfmaxdb %v0, %v0, %v0, 0
+ wfmaxdb %f0, %f0, %f0, 0
+ wfmaxdb %v0, %v0, %v0, 4
+ wfmaxdb %v0, %v0, %v31, 0
+ wfmaxdb %v0, %v31, %v0, 0
+ wfmaxdb %v31, %v0, %v0, 0
+ wfmaxdb %v18, %v3, %v20, 11
+
+#CHECK: wfmaxsb %f0, %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xef]
+#CHECK: wfmaxsb %f0, %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xef]
+#CHECK: wfmaxsb %f0, %f0, %f0, 4 # encoding: [0xe7,0x00,0x00,0x48,0x20,0xef]
+#CHECK: wfmaxsb %f0, %f0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xef]
+#CHECK: wfmaxsb %f0, %v31, %f0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xef]
+#CHECK: wfmaxsb %v31, %f0, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xef]
+#CHECK: wfmaxsb %v18, %f3, %v20, 11 # encoding: [0xe7,0x23,0x40,0xb8,0x2a,0xef]
+
+ wfmaxsb %v0, %v0, %v0, 0
+ wfmaxsb %f0, %f0, %f0, 0
+ wfmaxsb %v0, %v0, %v0, 4
+ wfmaxsb %v0, %v0, %v31, 0
+ wfmaxsb %v0, %v31, %v0, 0
+ wfmaxsb %v31, %v0, %v0, 0
+ wfmaxsb %v18, %v3, %v20, 11
+
+#CHECK: wfmaxxb %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xef]
+#CHECK: wfmaxxb %v0, %v0, %v0, 4 # encoding: [0xe7,0x00,0x00,0x48,0x40,0xef]
+#CHECK: wfmaxxb %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xef]
+#CHECK: wfmaxxb %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xef]
+#CHECK: wfmaxxb %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xef]
+#CHECK: wfmaxxb %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0xb8,0x4a,0xef]
+
+ wfmaxxb %v0, %v0, %v0, 0
+ wfmaxxb %v0, %v0, %v0, 4
+ wfmaxxb %v0, %v0, %v31, 0
+ wfmaxxb %v0, %v31, %v0, 0
+ wfmaxxb %v31, %v0, %v0, 0
+ wfmaxxb %v18, %v3, %v20, 11
+
+#CHECK: wfmindb %f0, %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xee]
+#CHECK: wfmindb %f0, %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x30,0xee]
+#CHECK: wfmindb %f0, %f0, %f0, 4 # encoding: [0xe7,0x00,0x00,0x48,0x30,0xee]
+#CHECK: wfmindb %f0, %f0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x08,0x32,0xee]
+#CHECK: wfmindb %f0, %v31, %f0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x34,0xee]
+#CHECK: wfmindb %v31, %f0, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x38,0xee]
+#CHECK: wfmindb %v18, %f3, %v20, 11 # encoding: [0xe7,0x23,0x40,0xb8,0x3a,0xee]
+
+ wfmindb %v0, %v0, %v0, 0
+ wfmindb %f0, %f0, %f0, 0
+ wfmindb %v0, %v0, %v0, 4
+ wfmindb %v0, %v0, %v31, 0
+ wfmindb %v0, %v31, %v0, 0
+ wfmindb %v31, %v0, %v0, 0
+ wfmindb %v18, %v3, %v20, 11
+
+#CHECK: wfminsb %f0, %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xee]
+#CHECK: wfminsb %f0, %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xee]
+#CHECK: wfminsb %f0, %f0, %f0, 4 # encoding: [0xe7,0x00,0x00,0x48,0x20,0xee]
+#CHECK: wfminsb %f0, %f0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xee]
+#CHECK: wfminsb %f0, %v31, %f0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xee]
+#CHECK: wfminsb %v31, %f0, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xee]
+#CHECK: wfminsb %v18, %f3, %v20, 11 # encoding: [0xe7,0x23,0x40,0xb8,0x2a,0xee]
+
+ wfminsb %v0, %v0, %v0, 0
+ wfminsb %f0, %f0, %f0, 0
+ wfminsb %v0, %v0, %v0, 4
+ wfminsb %v0, %v0, %v31, 0
+ wfminsb %v0, %v31, %v0, 0
+ wfminsb %v31, %v0, %v0, 0
+ wfminsb %v18, %v3, %v20, 11
+
+#CHECK: wfminxb %v0, %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xee]
+#CHECK: wfminxb %v0, %v0, %v0, 4 # encoding: [0xe7,0x00,0x00,0x48,0x40,0xee]
+#CHECK: wfminxb %v0, %v0, %v31, 0 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xee]
+#CHECK: wfminxb %v0, %v31, %v0, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xee]
+#CHECK: wfminxb %v31, %v0, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xee]
+#CHECK: wfminxb %v18, %v3, %v20, 11 # encoding: [0xe7,0x23,0x40,0xb8,0x4a,0xee]
+
+ wfminxb %v0, %v0, %v0, 0
+ wfminxb %v0, %v0, %v0, 4
+ wfminxb %v0, %v0, %v31, 0
+ wfminxb %v0, %v31, %v0, 0
+ wfminxb %v31, %v0, %v0, 0
+ wfminxb %v18, %v3, %v20, 11
+
+#CHECK: wfmasb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x02,0x08,0x00,0x8f]
+#CHECK: wfmasb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x02,0x08,0x00,0x8f]
+#CHECK: wfmasb %f0, %f0, %f0, %v31 # encoding: [0xe7,0x00,0x02,0x08,0xf1,0x8f]
+#CHECK: wfmasb %f0, %f0, %v31, %f0 # encoding: [0xe7,0x00,0xf2,0x08,0x02,0x8f]
+#CHECK: wfmasb %f0, %v31, %f0, %f0 # encoding: [0xe7,0x0f,0x02,0x08,0x04,0x8f]
+#CHECK: wfmasb %v31, %f0, %f0, %f0 # encoding: [0xe7,0xf0,0x02,0x08,0x08,0x8f]
+#CHECK: wfmasb %f13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x52,0x08,0x97,0x8f]
+
+ wfmasb %v0, %v0, %v0, %v0
+ wfmasb %f0, %f0, %f0, %f0
+ wfmasb %v0, %v0, %v0, %v31
+ wfmasb %v0, %v0, %v31, %v0
+ wfmasb %v0, %v31, %v0, %v0
+ wfmasb %v31, %v0, %v0, %v0
+ wfmasb %v13, %v17, %v21, %v25
+
+#CHECK: wfmaxb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x04,0x08,0x00,0x8f]
+#CHECK: wfmaxb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x04,0x08,0xf1,0x8f]
+#CHECK: wfmaxb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf4,0x08,0x02,0x8f]
+#CHECK: wfmaxb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x04,0x08,0x04,0x8f]
+#CHECK: wfmaxb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x04,0x08,0x08,0x8f]
+#CHECK: wfmaxb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x54,0x08,0x97,0x8f]
+
+ wfmaxb %v0, %v0, %v0, %v0
+ wfmaxb %v0, %v0, %v0, %v31
+ wfmaxb %v0, %v0, %v31, %v0
+ wfmaxb %v0, %v31, %v0, %v0
+ wfmaxb %v31, %v0, %v0, %v0
+ wfmaxb %v13, %v17, %v21, %v25
+
+#CHECK: wfmsb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe7]
+#CHECK: wfmsb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe7]
+#CHECK: wfmsb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xe7]
+#CHECK: wfmsb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xe7]
+#CHECK: wfmsb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xe7]
+#CHECK: wfmsb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x2a,0xe7]
+
+ wfmsb %v0, %v0, %v0
+ wfmsb %f0, %f0, %f0
+ wfmsb %v0, %v0, %v31
+ wfmsb %v0, %v31, %v0
+ wfmsb %v31, %v0, %v0
+ wfmsb %v18, %v3, %v20
+
+#CHECK: wfmxb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xe7]
+#CHECK: wfmxb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xe7]
+#CHECK: wfmxb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xe7]
+#CHECK: wfmxb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xe7]
+#CHECK: wfmxb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x4a,0xe7]
+
+ wfmxb %v0, %v0, %v0
+ wfmxb %v0, %v0, %v31
+ wfmxb %v0, %v31, %v0
+ wfmxb %v31, %v0, %v0
+ wfmxb %v18, %v3, %v20
+
+#CHECK: wfmssb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x02,0x08,0x00,0x8e]
+#CHECK: wfmssb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x02,0x08,0x00,0x8e]
+#CHECK: wfmssb %f0, %f0, %f0, %v31 # encoding: [0xe7,0x00,0x02,0x08,0xf1,0x8e]
+#CHECK: wfmssb %f0, %f0, %v31, %f0 # encoding: [0xe7,0x00,0xf2,0x08,0x02,0x8e]
+#CHECK: wfmssb %f0, %v31, %f0, %f0 # encoding: [0xe7,0x0f,0x02,0x08,0x04,0x8e]
+#CHECK: wfmssb %v31, %f0, %f0, %f0 # encoding: [0xe7,0xf0,0x02,0x08,0x08,0x8e]
+#CHECK: wfmssb %f13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x52,0x08,0x97,0x8e]
+
+ wfmssb %v0, %v0, %v0, %v0
+ wfmssb %f0, %f0, %f0, %f0
+ wfmssb %v0, %v0, %v0, %v31
+ wfmssb %v0, %v0, %v31, %v0
+ wfmssb %v0, %v31, %v0, %v0
+ wfmssb %v31, %v0, %v0, %v0
+ wfmssb %v13, %v17, %v21, %v25
+
+#CHECK: wfmsxb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x04,0x08,0x00,0x8e]
+#CHECK: wfmsxb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x04,0x08,0xf1,0x8e]
+#CHECK: wfmsxb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf4,0x08,0x02,0x8e]
+#CHECK: wfmsxb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x04,0x08,0x04,0x8e]
+#CHECK: wfmsxb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x04,0x08,0x08,0x8e]
+#CHECK: wfmsxb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x54,0x08,0x97,0x8e]
+
+ wfmsxb %v0, %v0, %v0, %v0
+ wfmsxb %v0, %v0, %v0, %v31
+ wfmsxb %v0, %v0, %v31, %v0
+ wfmsxb %v0, %v31, %v0, %v0
+ wfmsxb %v31, %v0, %v0, %v0
+ wfmsxb %v13, %v17, %v21, %v25
+
+#CHECK: wfnmadb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x03,0x08,0x00,0x9f]
+#CHECK: wfnmadb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x03,0x08,0x00,0x9f]
+#CHECK: wfnmadb %f0, %f0, %f0, %v31 # encoding: [0xe7,0x00,0x03,0x08,0xf1,0x9f]
+#CHECK: wfnmadb %f0, %f0, %v31, %f0 # encoding: [0xe7,0x00,0xf3,0x08,0x02,0x9f]
+#CHECK: wfnmadb %f0, %v31, %f0, %f0 # encoding: [0xe7,0x0f,0x03,0x08,0x04,0x9f]
+#CHECK: wfnmadb %v31, %f0, %f0, %f0 # encoding: [0xe7,0xf0,0x03,0x08,0x08,0x9f]
+#CHECK: wfnmadb %f13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x53,0x08,0x97,0x9f]
+
+ wfnmadb %v0, %v0, %v0, %v0
+ wfnmadb %f0, %f0, %f0, %f0
+ wfnmadb %v0, %v0, %v0, %v31
+ wfnmadb %v0, %v0, %v31, %v0
+ wfnmadb %v0, %v31, %v0, %v0
+ wfnmadb %v31, %v0, %v0, %v0
+ wfnmadb %v13, %v17, %v21, %v25
+
+#CHECK: wfnmasb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x02,0x08,0x00,0x9f]
+#CHECK: wfnmasb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x02,0x08,0x00,0x9f]
+#CHECK: wfnmasb %f0, %f0, %f0, %v31 # encoding: [0xe7,0x00,0x02,0x08,0xf1,0x9f]
+#CHECK: wfnmasb %f0, %f0, %v31, %f0 # encoding: [0xe7,0x00,0xf2,0x08,0x02,0x9f]
+#CHECK: wfnmasb %f0, %v31, %f0, %f0 # encoding: [0xe7,0x0f,0x02,0x08,0x04,0x9f]
+#CHECK: wfnmasb %v31, %f0, %f0, %f0 # encoding: [0xe7,0xf0,0x02,0x08,0x08,0x9f]
+#CHECK: wfnmasb %f13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x52,0x08,0x97,0x9f]
+
+ wfnmasb %v0, %v0, %v0, %v0
+ wfnmasb %f0, %f0, %f0, %f0
+ wfnmasb %v0, %v0, %v0, %v31
+ wfnmasb %v0, %v0, %v31, %v0
+ wfnmasb %v0, %v31, %v0, %v0
+ wfnmasb %v31, %v0, %v0, %v0
+ wfnmasb %v13, %v17, %v21, %v25
+
+#CHECK: wfnmaxb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x04,0x08,0x00,0x9f]
+#CHECK: wfnmaxb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x04,0x08,0xf1,0x9f]
+#CHECK: wfnmaxb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf4,0x08,0x02,0x9f]
+#CHECK: wfnmaxb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x04,0x08,0x04,0x9f]
+#CHECK: wfnmaxb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x04,0x08,0x08,0x9f]
+#CHECK: wfnmaxb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x54,0x08,0x97,0x9f]
+
+ wfnmaxb %v0, %v0, %v0, %v0
+ wfnmaxb %v0, %v0, %v0, %v31
+ wfnmaxb %v0, %v0, %v31, %v0
+ wfnmaxb %v0, %v31, %v0, %v0
+ wfnmaxb %v31, %v0, %v0, %v0
+ wfnmaxb %v13, %v17, %v21, %v25
+
+#CHECK: wfnmsdb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x03,0x08,0x00,0x9e]
+#CHECK: wfnmsdb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x03,0x08,0x00,0x9e]
+#CHECK: wfnmsdb %f0, %f0, %f0, %v31 # encoding: [0xe7,0x00,0x03,0x08,0xf1,0x9e]
+#CHECK: wfnmsdb %f0, %f0, %v31, %f0 # encoding: [0xe7,0x00,0xf3,0x08,0x02,0x9e]
+#CHECK: wfnmsdb %f0, %v31, %f0, %f0 # encoding: [0xe7,0x0f,0x03,0x08,0x04,0x9e]
+#CHECK: wfnmsdb %v31, %f0, %f0, %f0 # encoding: [0xe7,0xf0,0x03,0x08,0x08,0x9e]
+#CHECK: wfnmsdb %f13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x53,0x08,0x97,0x9e]
+
+ wfnmsdb %v0, %v0, %v0, %v0
+ wfnmsdb %f0, %f0, %f0, %f0
+ wfnmsdb %v0, %v0, %v0, %v31
+ wfnmsdb %v0, %v0, %v31, %v0
+ wfnmsdb %v0, %v31, %v0, %v0
+ wfnmsdb %v31, %v0, %v0, %v0
+ wfnmsdb %v13, %v17, %v21, %v25
+
+#CHECK: wfnmssb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x02,0x08,0x00,0x9e]
+#CHECK: wfnmssb %f0, %f0, %f0, %f0 # encoding: [0xe7,0x00,0x02,0x08,0x00,0x9e]
+#CHECK: wfnmssb %f0, %f0, %f0, %v31 # encoding: [0xe7,0x00,0x02,0x08,0xf1,0x9e]
+#CHECK: wfnmssb %f0, %f0, %v31, %f0 # encoding: [0xe7,0x00,0xf2,0x08,0x02,0x9e]
+#CHECK: wfnmssb %f0, %v31, %f0, %f0 # encoding: [0xe7,0x0f,0x02,0x08,0x04,0x9e]
+#CHECK: wfnmssb %v31, %f0, %f0, %f0 # encoding: [0xe7,0xf0,0x02,0x08,0x08,0x9e]
+#CHECK: wfnmssb %f13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x52,0x08,0x97,0x9e]
+
+ wfnmssb %v0, %v0, %v0, %v0
+ wfnmssb %f0, %f0, %f0, %f0
+ wfnmssb %v0, %v0, %v0, %v31
+ wfnmssb %v0, %v0, %v31, %v0
+ wfnmssb %v0, %v31, %v0, %v0
+ wfnmssb %v31, %v0, %v0, %v0
+ wfnmssb %v13, %v17, %v21, %v25
+
+#CHECK: wfnmsxb %v0, %v0, %v0, %v0 # encoding: [0xe7,0x00,0x04,0x08,0x00,0x9e]
+#CHECK: wfnmsxb %v0, %v0, %v0, %v31 # encoding: [0xe7,0x00,0x04,0x08,0xf1,0x9e]
+#CHECK: wfnmsxb %v0, %v0, %v31, %v0 # encoding: [0xe7,0x00,0xf4,0x08,0x02,0x9e]
+#CHECK: wfnmsxb %v0, %v31, %v0, %v0 # encoding: [0xe7,0x0f,0x04,0x08,0x04,0x9e]
+#CHECK: wfnmsxb %v31, %v0, %v0, %v0 # encoding: [0xe7,0xf0,0x04,0x08,0x08,0x9e]
+#CHECK: wfnmsxb %v13, %v17, %v21, %v25 # encoding: [0xe7,0xd1,0x54,0x08,0x97,0x9e]
+
+ wfnmsxb %v0, %v0, %v0, %v0
+ wfnmsxb %v0, %v0, %v0, %v31
+ wfnmsxb %v0, %v0, %v31, %v0
+ wfnmsxb %v0, %v31, %v0, %v0
+ wfnmsxb %v31, %v0, %v0, %v0
+ wfnmsxb %v13, %v17, %v21, %v25
+
+#CHECK: wfssb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe2]
+#CHECK: wfssb %f0, %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xe2]
+#CHECK: wfssb %f0, %f0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x22,0xe2]
+#CHECK: wfssb %f0, %v31, %f0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xe2]
+#CHECK: wfssb %v31, %f0, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xe2]
+#CHECK: wfssb %v18, %f3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x2a,0xe2]
+
+ wfssb %v0, %v0, %v0
+ wfssb %f0, %f0, %f0
+ wfssb %v0, %v0, %v31
+ wfssb %v0, %v31, %v0
+ wfssb %v31, %v0, %v0
+ wfssb %v18, %v3, %v20
+
+#CHECK: wfsxb %v0, %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xe2]
+#CHECK: wfsxb %v0, %v0, %v31 # encoding: [0xe7,0x00,0xf0,0x08,0x42,0xe2]
+#CHECK: wfsxb %v0, %v31, %v0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xe2]
+#CHECK: wfsxb %v31, %v0, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xe2]
+#CHECK: wfsxb %v18, %v3, %v20 # encoding: [0xe7,0x23,0x40,0x08,0x4a,0xe2]
+
+ wfsxb %v0, %v0, %v0
+ wfsxb %v0, %v0, %v31
+ wfsxb %v0, %v31, %v0
+ wfsxb %v31, %v0, %v0
+ wfsxb %v18, %v3, %v20
+
+#CHECK: wfsqsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xce]
+#CHECK: wfsqsb %f0, %f0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0xce]
+#CHECK: wfsqsb %f0, %f15 # encoding: [0xe7,0x0f,0x00,0x08,0x20,0xce]
+#CHECK: wfsqsb %f0, %v31 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0xce]
+#CHECK: wfsqsb %f15, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x20,0xce]
+#CHECK: wfsqsb %v31, %f0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0xce]
+#CHECK: wfsqsb %f14, %v17 # encoding: [0xe7,0xe1,0x00,0x08,0x24,0xce]
+
+ wfsqsb %v0, %v0
+ wfsqsb %f0, %f0
+ wfsqsb %v0, %v15
+ wfsqsb %v0, %v31
+ wfsqsb %v15, %v0
+ wfsqsb %v31, %v0
+ wfsqsb %v14, %v17
+
+#CHECK: wfsqxb %v0, %v0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0xce]
+#CHECK: wfsqxb %v0, %v15 # encoding: [0xe7,0x0f,0x00,0x08,0x40,0xce]
+#CHECK: wfsqxb %v0, %v31 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0xce]
+#CHECK: wfsqxb %v15, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x40,0xce]
+#CHECK: wfsqxb %v31, %v0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0xce]
+#CHECK: wfsqxb %v14, %v17 # encoding: [0xe7,0xe1,0x00,0x08,0x44,0xce]
+
+ wfsqxb %v0, %v0
+ wfsqxb %v0, %v15
+ wfsqxb %v0, %v31
+ wfsqxb %v15, %v0
+ wfsqxb %v31, %v0
+ wfsqxb %v14, %v17
+
+#CHECK: wftcisb %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0x4a]
+#CHECK: wftcisb %f0, %f0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x20,0x4a]
+#CHECK: wftcisb %f0, %f0, 4095 # encoding: [0xe7,0x00,0xff,0xf8,0x20,0x4a]
+#CHECK: wftcisb %f0, %f15, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x20,0x4a]
+#CHECK: wftcisb %f0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x24,0x4a]
+#CHECK: wftcisb %f15, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x20,0x4a]
+#CHECK: wftcisb %v31, %f0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x28,0x4a]
+#CHECK: wftcisb %f4, %v21, 1656 # encoding: [0xe7,0x45,0x67,0x88,0x24,0x4a]
+
+ wftcisb %v0, %v0, 0
+ wftcisb %f0, %f0, 0
+ wftcisb %v0, %v0, 4095
+ wftcisb %v0, %v15, 0
+ wftcisb %v0, %v31, 0
+ wftcisb %v15, %v0, 0
+ wftcisb %v31, %v0, 0
+ wftcisb %v4, %v21, 0x678
+
+#CHECK: wftcixb %v0, %v0, 0 # encoding: [0xe7,0x00,0x00,0x08,0x40,0x4a]
+#CHECK: wftcixb %v0, %v0, 4095 # encoding: [0xe7,0x00,0xff,0xf8,0x40,0x4a]
+#CHECK: wftcixb %v0, %v15, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x40,0x4a]
+#CHECK: wftcixb %v0, %v31, 0 # encoding: [0xe7,0x0f,0x00,0x08,0x44,0x4a]
+#CHECK: wftcixb %v15, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x40,0x4a]
+#CHECK: wftcixb %v31, %v0, 0 # encoding: [0xe7,0xf0,0x00,0x08,0x48,0x4a]
+#CHECK: wftcixb %v4, %v21, 1656 # encoding: [0xe7,0x45,0x67,0x88,0x44,0x4a]
+
+ wftcixb %v0, %v0, 0
+ wftcixb %v0, %v0, 4095
+ wftcixb %v0, %v15, 0
+ wftcixb %v0, %v31, 0
+ wftcixb %v15, %v0, 0
+ wftcixb %v31, %v0, 0
+ wftcixb %v4, %v21, 0x678
+
diff --git a/test/MC/SystemZ/invalid-instructions-spellcheck.s b/test/MC/SystemZ/invalid-instructions-spellcheck.s
new file mode 100644
index 0000000000000..e77b99d9a3875
--- /dev/null
+++ b/test/MC/SystemZ/invalid-instructions-spellcheck.s
@@ -0,0 +1,66 @@
+# RUN: not llvm-mc -triple=systemz -mcpu=z13 -show-encoding < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple=systemz -mcpu=zEC12 -show-encoding < %s 2>&1 | FileCheck %s --check-prefix=CHECK-ZEC12
+
+# This tests the mnemonic spell checker.
+
+# First check what happens when an instruction is omitted:
+
+ %r1, %r2, %r3
+
+# CHECK: error: unexpected token at start of statement
+# CHECK-NEXT: %r1, %r2, %r3
+# CHECK-NEXT: ^
+
+# We don't want to see a suggestion here; the edit distance is too large to
+# give sensible suggestions:
+
+ aaaaaaaaaaaaaaa %r1, %r2, %r3
+
+# CHECK: error: invalid instruction
+# CHECK-NEXT: aaaaaaaaaaaaaaa %r1, %r2, %r3
+# CHECK-NEXT: ^
+
+# Check that we get one suggestion: 'cpdt' is 1 edit away, i.e. an deletion.
+
+ cpdtX %r1, 0(4, %r15), 0
+
+#CHECK: error: invalid instruction, did you mean: cpdt
+#CHECK-NEXT: cpdtX %r1, 0(4, %r15), 0
+#CHECK-NEXT: ^
+
+# Check edit distance 1 and 2
+
+ ltTr %r1, %r2
+
+# CHECK: error: invalid instruction, did you mean: lr, lt, ltdr, ltdtr, lter, ltgr, ltr, ltxr, ltxtr, tr, trtr?
+# CHECK-NEXT: ltTr %r1, %r2
+# CHECK-NEXT: ^
+
+# Check edit distance 1 and 2, just insertions:
+
+ begin 0, 65292
+
+# CHECK: error: invalid instruction, did you mean: tbegin, tbeginc?
+# CHECK-NEXT: begin 0, 65292
+# CHECK-NEXT: ^
+
+# Check an instruction that is 2 edits away, and also has a lot of candidates:
+
+ adt %r1, 244(%r15)
+
+# CHECK: error: invalid instruction, did you mean: a, ad, adb, adr, adtr, adtra, d, lat, mad, qadtr?
+# CHECK-NEXT: adt %r1, 244(%r15)
+# CHECK-NEXT: ^
+
+# Here it is checked that we don't suggest instructions that are not supported.
+# For example, in pre-z13 mode we don't want to see suggestions for vector instructions.
+
+ vlvggp %v1, %r2, %r3
+
+# CHECK-ZEC12: error: invalid instruction
+# CHECK-ZEC12: vlvggp
+# CHECK-ZEC12: ^
+
+# CHECK: error: invalid instruction, did you mean: vlvg, vlvgg, vlvgp?
+# CHECK-NEXT: vlvggp %v1, %r2, %r3
+# CHECK-NEXT: ^
diff --git a/test/MC/X86/pr22028.s b/test/MC/X86/pr22028.s
index 6ce7da07488bf..d82b50f051a11 100644
--- a/test/MC/X86/pr22028.s
+++ b/test/MC/X86/pr22028.s
@@ -1,6 +1,6 @@
// RUN: llvm-mc -triple i386-unknown-unknown-code16 -show-encoding %s | FileCheck --check-prefix=CHECK16 %s
-// RUN: llvm-mc -triple i386-unknown-unknown -show-encoding %s | FileCheck --check-prefix=CHECK %s
-// RUN: llvm-mc -triple i686-unknown-unknown -show-encoding %s | FileCheck --check-prefix=CHECK %s
+// RUN: llvm-mc -triple i386-unknown-unknown -show-encoding %s | FileCheck %s
+// RUN: llvm-mc -triple i686-unknown-unknown -show-encoding %s | FileCheck %s
.intel_syntax
diff --git a/test/Object/no-section-table.test b/test/Object/no-section-table.test
index bd60e681b71f3..9ecde4f8c3690 100644
--- a/test/Object/no-section-table.test
+++ b/test/Object/no-section-table.test
@@ -3,7 +3,7 @@ RUN: | FileCheck %s
CHECK: DynamicSection [ (24 entries)
CHECK: Tag Type Name/Value
-CHECK: 0x0000000000000001 NEEDED SharedLibrary (libc.so.6)
+CHECK: 0x0000000000000001 NEEDED Shared library: [libc.so.6]
CHECK: 0x000000000000000C INIT 0x4B8
CHECK: 0x000000000000000D FINI 0x618
CHECK: 0x0000000000000019 INIT_ARRAY 0x2006C0
diff --git a/test/Object/readobj-shared-object.test b/test/Object/readobj-shared-object.test
index 173581e60c39b..59f5ff127cf36 100644
--- a/test/Object/readobj-shared-object.test
+++ b/test/Object/readobj-shared-object.test
@@ -302,9 +302,9 @@ ELF: ]
ELF32: DynamicSection [ (9 entries)
ELF32: Tag Type Name/Value
-ELF32: 0x00000001 NEEDED SharedLibrary (libc.so.6)
-ELF32: 0x00000001 NEEDED SharedLibrary (libm.so.6)
-ELF32: 0x0000000E SONAME LibrarySoname (libfoo.so)
+ELF32: 0x00000001 NEEDED Shared library: [libc.so.6]
+ELF32: 0x00000001 NEEDED Shared library: [libm.so.6]
+ELF32: 0x0000000E SONAME Library soname: [libfoo.so]
ELF32: 0x00000004 HASH {{[0-9a-f]+}}
ELF32: 0x00000005 STRTAB {{[0-9a-f]+}}
ELF32: 0x00000006 SYMTAB {{[0-9a-f]+}}
@@ -315,9 +315,9 @@ ELF32: ]
ELF64: DynamicSection [ (9 entries)
ELF64: Tag Type Name/Value
-ELF64: 0x0000000000000001 NEEDED SharedLibrary (libc.so.6)
-ELF64: 0x0000000000000001 NEEDED SharedLibrary (libm.so.6)
-ELF64: 0x000000000000000E SONAME LibrarySoname (libfoo.so)
+ELF64: 0x0000000000000001 NEEDED Shared library: [libc.so.6]
+ELF64: 0x0000000000000001 NEEDED Shared library: [libm.so.6]
+ELF64: 0x000000000000000E SONAME Library soname: [libfoo.so]
ELF64: 0x0000000000000004 HASH {{[0-9a-f]+}}
ELF64: 0x0000000000000005 STRTAB {{[0-9a-f]+}}
ELF64: 0x0000000000000006 SYMTAB {{[0-9a-f]+}}
diff --git a/test/ObjectYAML/CodeView/guid.yaml b/test/ObjectYAML/CodeView/guid.yaml
new file mode 100644
index 0000000000000..8d8d0142c5e35
--- /dev/null
+++ b/test/ObjectYAML/CodeView/guid.yaml
@@ -0,0 +1,59 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !COFF
+header:
+ Machine: IMAGE_FILE_MACHINE_AMD64
+ Characteristics: [ ]
+sections:
+ - Name: '.debug$T'
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+ Alignment: 1
+ Types:
+ - Kind: LF_TYPESERVER2
+ TypeServer2:
+ Guid: '{01DF191B-22BF-6B42-96CE-5258B8329FE5}'
+ Age: 24
+ Name: 'C:\src\llvm-project\build\vc140.pdb'
+symbols:
+ - Name: '.debug$T'
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 64
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 0
+...
+
+# CHECK: --- !COFF
+# CHECK: header:
+# CHECK: Machine: IMAGE_FILE_MACHINE_AMD64
+# CHECK: Characteristics: [ ]
+# CHECK: sections:
+# CHECK: - Name: '.debug$T'
+# CHECK: Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_DISCARDABLE, IMAGE_SCN_MEM_READ ]
+# CHECK: Alignment: 1
+# CHECK: Types:
+# CHECK: - Kind: LF_TYPESERVER2
+# CHECK: TypeServer2:
+# CHECK: Guid: '{01DF191B-22BF-6B42-96CE-5258B8329FE5}'
+# CHECK: Age: 24
+# CHECK: Name: 'C:\src\llvm-project\build\vc140.pdb'
+# CHECK: symbols:
+# CHECK: - Name: '.debug$T'
+# CHECK: Value: 0
+# CHECK: SectionNumber: 1
+# CHECK: SimpleType: IMAGE_SYM_TYPE_NULL
+# CHECK: ComplexType: IMAGE_SYM_DTYPE_NULL
+# CHECK: StorageClass: IMAGE_SYM_CLASS_STATIC
+# CHECK: SectionDefinition:
+# CHECK: Length: 64
+# CHECK: NumberOfRelocations: 0
+# CHECK: NumberOfLinenumbers: 0
+# CHECK: CheckSum: 0
+# CHECK: Number: 0
+# CHECK: ...
diff --git a/test/Other/cgscc-libcall-update.ll b/test/Other/cgscc-libcall-update.ll
new file mode 100644
index 0000000000000..e0833ca092664
--- /dev/null
+++ b/test/Other/cgscc-libcall-update.ll
@@ -0,0 +1,61 @@
+; Make sure that the CGSCC pass manager can handle when instcombine simplifies
+; one libcall into an unrelated libcall and update the call graph accordingly.
+;
+; Also check that it can handle inlining *removing* a libcall entirely.
+;
+; RUN: opt -passes='cgscc(inline,function(instcombine))' -S < %s | FileCheck %s
+
+define i8* @wibble(i8* %arg1, i8* %arg2) {
+; CHECK-LABEL: define i8* @wibble(
+bb:
+ %tmp = alloca [1024 x i8], align 16
+ %tmp2 = getelementptr inbounds [1024 x i8], [1024 x i8]* %tmp, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* %arg1, i64 1024, i32 0, i1 false)
+; CHECK: call void @llvm.memcpy
+ %tmp3 = call i64 @llvm.objectsize.i64.p0i8(i8* %tmp2, i1 false, i1 true)
+ %tmp4 = call i8* @__strncpy_chk(i8* %arg2, i8* %tmp2, i64 1023, i64 %tmp3)
+; CHECK-NOT: call
+; CHECK: call i8* @strncpy(i8* %arg2, i8* %tmp2, i64 1023)
+; CHECK-NOT: call
+
+ ret i8* %tmp4
+; CHECK: ret
+}
+
+define i8* @strncpy(i8* %arg1, i8* %arg2, i64 %size) noinline {
+bb:
+ %result = call i8* @my_special_strncpy(i8* %arg1, i8* %arg2, i64 %size)
+ ret i8* %result
+}
+
+declare i8* @my_special_strncpy(i8* %arg1, i8* %arg2, i64 %size)
+
+declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1)
+
+declare i8* @__strncpy_chk(i8*, i8*, i64, i64)
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
+
+; Check that even when we completely remove a libcall we don't get the call
+; graph wrong once we handle libcalls in the call graph specially to address
+; the above case.
+define i32 @hoge(i32* %arg1) {
+; CHECK-LABEL: define i32 @hoge(
+bb:
+ %tmp41 = load i32*, i32** null
+ %tmp6 = load i32, i32* %arg1
+ %tmp7 = call i32 @ntohl(i32 %tmp6)
+; CHECK-NOT: call i32 @ntohl
+ ret i32 %tmp7
+; CHECK: ret i32
+}
+
+; Even though this function is not used, it should be retained as it may be
+; used when doing further libcall transformations.
+define internal i32 @ntohl(i32 %x) {
+; CHECK-LABEL: define internal i32 @ntohl(
+entry:
+ %and2 = lshr i32 %x, 8
+ %shr = and i32 %and2, 65280
+ ret i32 %shr
+}
diff --git a/test/Other/new-pass-manager.ll b/test/Other/new-pass-manager.ll
index bf8e596d118b8..35f596e779888 100644
--- a/test/Other/new-pass-manager.ll
+++ b/test/Other/new-pass-manager.ll
@@ -23,6 +23,7 @@
; CHECK-CGSCC-PASS-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*(CGSCCAnalysisManager|AnalysisManager<.*LazyCallGraph::SCC.*>).*}},{{.*}}Module>
; CHECK-CGSCC-PASS-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*(FunctionAnalysisManager|AnalysisManager<.*Function.*>).*}},{{.*}}Module>
; CHECK-CGSCC-PASS-NEXT: Running analysis: LazyCallGraphAnalysis
+; CHECK-CGSCC-PASS-NEXT: Running analysis: TargetLibraryAnalysis
; CHECK-CGSCC-PASS-NEXT: Running an SCC pass across the RefSCC: [(foo)]
; CHECK-CGSCC-PASS-NEXT: Starting CGSCC pass manager run
; CHECK-CGSCC-PASS-NEXT: Running pass: NoOpCGSCCPass
@@ -407,6 +408,7 @@
; CHECK-REPEAT-CGSCC-PASS-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*(CGSCCAnalysisManager|AnalysisManager<.*LazyCallGraph::SCC.*>).*}},{{.*}}Module>
; CHECK-REPEAT-CGSCC-PASS-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*(FunctionAnalysisManager|AnalysisManager<.*Function.*>).*}},{{.*}}Module>
; CHECK-REPEAT-CGSCC-PASS-NEXT: Running analysis: LazyCallGraphAnalysis
+; CHECK-REPEAT-CGSCC-PASS-NEXT: Running analysis: TargetLibraryAnalysis
; CHECK-REPEAT-CGSCC-PASS-NEXT: Running an SCC pass across the RefSCC: [(foo)]
; CHECK-REPEAT-CGSCC-PASS-NEXT: Starting CGSCC pass manager run
; CHECK-REPEAT-CGSCC-PASS-NEXT: Running pass: RepeatedPass
diff --git a/test/ThinLTO/X86/debuginfo-cu-import.ll b/test/ThinLTO/X86/debuginfo-cu-import.ll
index 42a751191860a..e0b066c736e4b 100644
--- a/test/ThinLTO/X86/debuginfo-cu-import.ll
+++ b/test/ThinLTO/X86/debuginfo-cu-import.ll
@@ -51,11 +51,11 @@ entry:
!9 = !DIGlobalVariableExpression(var: !10)
!10 = !DIGlobalVariable(name: "version", scope: !4, file: !1, line: 2, type: !7, isLocal: false, isDefinition: true)
!11 = !{!12, !16}
-!12 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !4, entity: !13, line: 8)
+!12 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !4, entity: !13, file: !1, line: 8)
!13 = distinct !DISubprogram(name: "a", linkageName: "_ZN1A1aEv", scope: !4, file: !1, line: 7, type: !14, isLocal: false, isDefinition: true, scopeLine: 7, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !5)
!14 = !DISubroutineType(types: !15)
!15 = !{null}
-!16 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !17, entity: !19, line: 8)
+!16 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !17, entity: !19, file: !1, line: 8)
!17 = distinct !DILexicalBlock(scope: !18, file: !1, line: 9, column: 8)
!18 = distinct !DISubprogram(name: "c", linkageName: "_ZN1A1cEv", scope: !4, file: !1, line: 9, type: !14, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !5)
!19 = distinct !DILexicalBlock(scope: !20, file: !1, line: 10, column: 8)
diff --git a/test/Transforms/CodeGenPrepare/X86/memcmp.ll b/test/Transforms/CodeGenPrepare/X86/memcmp.ll
index 4b9e7c3956f58..1dfc087619653 100644
--- a/test/Transforms/CodeGenPrepare/X86/memcmp.ll
+++ b/test/Transforms/CodeGenPrepare/X86/memcmp.ll
@@ -23,9 +23,63 @@ define i32 @cmp2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp3(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 3)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp3(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i16*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i16*
+; X32-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = zext i16 [[TMP4]] to i32
+; X32-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i32
+; X32-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP6]], [[TMP7]]
+; X32-NEXT: br i1 [[TMP8]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP6]], [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[X]], i8 2
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 2
+; X32-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i8, i8* [[TMP12]]
+; X32-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32
+; X32-NEXT: [[TMP16:%.*]] = zext i8 [[TMP14]] to i32
+; X32-NEXT: [[TMP17:%.*]] = sub i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP17]], [[LOADBB1]] ], [ [[TMP10]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp3(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i16*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i16*
+; X64-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = zext i16 [[TMP4]] to i64
+; X64-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i64
+; X64-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP6]], [[TMP7]]
+; X64-NEXT: br i1 [[TMP8]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[TMP9:%.*]] = icmp ult i64 [[TMP6]], [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[X]], i8 2
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 2
+; X64-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i8, i8* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32
+; X64-NEXT: [[TMP16:%.*]] = zext i8 [[TMP14]] to i32
+; X64-NEXT: [[TMP17:%.*]] = sub i32 [[TMP15]], [[TMP16]]
+; X64-NEXT: br label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP17]], [[LOADBB1]] ], [ [[TMP10]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 3)
ret i32 %call
@@ -50,27 +104,225 @@ define i32 @cmp4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp5(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 5)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp5(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; X32-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; X32-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]]
+; X32-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]]
+; X32-NEXT: [[TMP13:%.*]] = zext i8 [[TMP11]] to i32
+; X32-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32
+; X32-NEXT: [[TMP15:%.*]] = sub i32 [[TMP13]], [[TMP14]]
+; X32-NEXT: br label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP15]], [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp5(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X64-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = zext i32 [[TMP4]] to i64
+; X64-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
+; X64-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP6]], [[TMP7]]
+; X64-NEXT: br i1 [[TMP8]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[TMP9:%.*]] = icmp ult i64 [[TMP6]], [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; X64-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i8, i8* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32
+; X64-NEXT: [[TMP16:%.*]] = zext i8 [[TMP14]] to i32
+; X64-NEXT: [[TMP17:%.*]] = sub i32 [[TMP15]], [[TMP16]]
+; X64-NEXT: br label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP17]], [[LOADBB1]] ], [ [[TMP10]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 5)
ret i32 %call
}
define i32 @cmp6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp6(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 6)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp6(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i16*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i16*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i16, i16* [[TMP9]], i16 2
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i16, i16* [[TMP10]], i16 2
+; X32-NEXT: [[TMP13:%.*]] = load i16, i16* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP12]]
+; X32-NEXT: [[TMP15:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP13]])
+; X32-NEXT: [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]])
+; X32-NEXT: [[TMP17]] = zext i16 [[TMP15]] to i32
+; X32-NEXT: [[TMP18]] = zext i16 [[TMP16]] to i32
+; X32-NEXT: [[TMP19:%.*]] = icmp eq i32 [[TMP17]], [[TMP18]]
+; X32-NEXT: br i1 [[TMP19]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp6(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X64-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = zext i32 [[TMP4]] to i64
+; X64-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
+; X64-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP6]], [[TMP7]]
+; X64-NEXT: br i1 [[TMP8]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP6]], [[LOADBB:%.*]] ], [ [[TMP19:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP7]], [[LOADBB]] ], [ [[TMP20:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[TMP9:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP11:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP13:%.*]] = getelementptr i16, i16* [[TMP11]], i16 2
+; X64-NEXT: [[TMP14:%.*]] = getelementptr i16, i16* [[TMP12]], i16 2
+; X64-NEXT: [[TMP15:%.*]] = load i16, i16* [[TMP13]]
+; X64-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP14]]
+; X64-NEXT: [[TMP17:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP15]])
+; X64-NEXT: [[TMP18:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP16]])
+; X64-NEXT: [[TMP19]] = zext i16 [[TMP17]] to i64
+; X64-NEXT: [[TMP20]] = zext i16 [[TMP18]] to i64
+; X64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[TMP19]], [[TMP20]]
+; X64-NEXT: br i1 [[TMP21]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP10]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 6)
ret i32 %call
}
define i32 @cmp7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp7(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 7)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp7(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i16*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i16*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i16, i16* [[TMP9]], i16 2
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i16, i16* [[TMP10]], i16 2
+; X32-NEXT: [[TMP13:%.*]] = load i16, i16* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP12]]
+; X32-NEXT: [[TMP15:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP13]])
+; X32-NEXT: [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]])
+; X32-NEXT: [[TMP17]] = zext i16 [[TMP15]] to i32
+; X32-NEXT: [[TMP18]] = zext i16 [[TMP16]] to i32
+; X32-NEXT: [[TMP19:%.*]] = icmp eq i32 [[TMP17]], [[TMP18]]
+; X32-NEXT: br i1 [[TMP19]], label [[LOADBB2:%.*]], label [[RES_BLOCK]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[X]], i8 6
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[Y]], i8 6
+; X32-NEXT: [[TMP22:%.*]] = load i8, i8* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = load i8, i8* [[TMP21]]
+; X32-NEXT: [[TMP24:%.*]] = zext i8 [[TMP22]] to i32
+; X32-NEXT: [[TMP25:%.*]] = zext i8 [[TMP23]] to i32
+; X32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], [[TMP25]]
+; X32-NEXT: br label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP26]], [[LOADBB2]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp7(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X64-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = zext i32 [[TMP4]] to i64
+; X64-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
+; X64-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP6]], [[TMP7]]
+; X64-NEXT: br i1 [[TMP8]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP6]], [[LOADBB:%.*]] ], [ [[TMP19:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP7]], [[LOADBB]] ], [ [[TMP20:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[TMP9:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP11:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP13:%.*]] = getelementptr i16, i16* [[TMP11]], i16 2
+; X64-NEXT: [[TMP14:%.*]] = getelementptr i16, i16* [[TMP12]], i16 2
+; X64-NEXT: [[TMP15:%.*]] = load i16, i16* [[TMP13]]
+; X64-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP14]]
+; X64-NEXT: [[TMP17:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP15]])
+; X64-NEXT: [[TMP18:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP16]])
+; X64-NEXT: [[TMP19]] = zext i16 [[TMP17]] to i64
+; X64-NEXT: [[TMP20]] = zext i16 [[TMP18]] to i64
+; X64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[TMP19]], [[TMP20]]
+; X64-NEXT: br i1 [[TMP21]], label [[LOADBB2:%.*]], label [[RES_BLOCK]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP22:%.*]] = getelementptr i8, i8* [[X]], i8 6
+; X64-NEXT: [[TMP23:%.*]] = getelementptr i8, i8* [[Y]], i8 6
+; X64-NEXT: [[TMP24:%.*]] = load i8, i8* [[TMP22]]
+; X64-NEXT: [[TMP25:%.*]] = load i8, i8* [[TMP23]]
+; X64-NEXT: [[TMP26:%.*]] = zext i8 [[TMP24]] to i32
+; X64-NEXT: [[TMP27:%.*]] = zext i8 [[TMP25]] to i32
+; X64-NEXT: [[TMP28:%.*]] = sub i32 [[TMP26]], [[TMP27]]
+; X64-NEXT: br label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP28]], [[LOADBB2]] ], [ [[TMP10]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 7)
ret i32 %call
@@ -78,8 +330,35 @@ define i32 @cmp7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; X32-LABEL: @cmp8(
-; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 8)
-; X32-NEXT: ret i32 [[CALL]]
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 1
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
+; X32-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X32-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br i1 [[TMP17]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
;
; X64-LABEL: @cmp8(
; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
@@ -99,72 +378,691 @@ define i32 @cmp8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp9(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 9)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp9(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 1
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
+; X32-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X32-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br i1 [[TMP17]], label [[LOADBB2:%.*]], label [[RES_BLOCK]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP18:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X32-NEXT: [[TMP19:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X32-NEXT: [[TMP20:%.*]] = load i8, i8* [[TMP18]]
+; X32-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP19]]
+; X32-NEXT: [[TMP22:%.*]] = zext i8 [[TMP20]] to i32
+; X32-NEXT: [[TMP23:%.*]] = zext i8 [[TMP21]] to i32
+; X32-NEXT: [[TMP24:%.*]] = sub i32 [[TMP22]], [[TMP23]]
+; X32-NEXT: br label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP24]], [[LOADBB2]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp9(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]]
+; X64-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]]
+; X64-NEXT: [[TMP13:%.*]] = zext i8 [[TMP11]] to i32
+; X64-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32
+; X64-NEXT: [[TMP15:%.*]] = sub i32 [[TMP13]], [[TMP14]]
+; X64-NEXT: br label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP15]], [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 9)
ret i32 %call
}
define i32 @cmp10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp10(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp10(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ], [ [[TMP26:%.*]], [[LOADBB2:%.*]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ], [ [[TMP27:%.*]], [[LOADBB2]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 1
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
+; X32-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X32-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br i1 [[TMP17]], label [[LOADBB2]], label [[RES_BLOCK]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP18:%.*]] = bitcast i8* [[X]] to i16*
+; X32-NEXT: [[TMP19:%.*]] = bitcast i8* [[Y]] to i16*
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i16, i16* [[TMP18]], i16 4
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i16, i16* [[TMP19]], i16 4
+; X32-NEXT: [[TMP22:%.*]] = load i16, i16* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = load i16, i16* [[TMP21]]
+; X32-NEXT: [[TMP24:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP22]])
+; X32-NEXT: [[TMP25:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP23]])
+; X32-NEXT: [[TMP26]] = zext i16 [[TMP24]] to i32
+; X32-NEXT: [[TMP27]] = zext i16 [[TMP25]] to i32
+; X32-NEXT: [[TMP28:%.*]] = icmp eq i32 [[TMP26]], [[TMP27]]
+; X32-NEXT: br i1 [[TMP28]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp10(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i16, i16* [[TMP9]], i16 4
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i16, i16* [[TMP10]], i16 4
+; X64-NEXT: [[TMP13:%.*]] = load i16, i16* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP13]])
+; X64-NEXT: [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]])
+; X64-NEXT: [[TMP17]] = zext i16 [[TMP15]] to i64
+; X64-NEXT: [[TMP18]] = zext i16 [[TMP16]] to i64
+; X64-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP17]], [[TMP18]]
+; X64-NEXT: br i1 [[TMP19]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 10)
ret i32 %call
}
define i32 @cmp11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp11(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp11(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ], [ [[TMP26:%.*]], [[LOADBB2:%.*]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ], [ [[TMP27:%.*]], [[LOADBB2]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 1
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
+; X32-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X32-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br i1 [[TMP17]], label [[LOADBB2]], label [[RES_BLOCK]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP18:%.*]] = bitcast i8* [[X]] to i16*
+; X32-NEXT: [[TMP19:%.*]] = bitcast i8* [[Y]] to i16*
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i16, i16* [[TMP18]], i16 4
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i16, i16* [[TMP19]], i16 4
+; X32-NEXT: [[TMP22:%.*]] = load i16, i16* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = load i16, i16* [[TMP21]]
+; X32-NEXT: [[TMP24:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP22]])
+; X32-NEXT: [[TMP25:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP23]])
+; X32-NEXT: [[TMP26]] = zext i16 [[TMP24]] to i32
+; X32-NEXT: [[TMP27]] = zext i16 [[TMP25]] to i32
+; X32-NEXT: [[TMP28:%.*]] = icmp eq i32 [[TMP26]], [[TMP27]]
+; X32-NEXT: br i1 [[TMP28]], label [[LOADBB3:%.*]], label [[RES_BLOCK]]
+; X32: loadbb3:
+; X32-NEXT: [[TMP29:%.*]] = getelementptr i8, i8* [[X]], i8 10
+; X32-NEXT: [[TMP30:%.*]] = getelementptr i8, i8* [[Y]], i8 10
+; X32-NEXT: [[TMP31:%.*]] = load i8, i8* [[TMP29]]
+; X32-NEXT: [[TMP32:%.*]] = load i8, i8* [[TMP30]]
+; X32-NEXT: [[TMP33:%.*]] = zext i8 [[TMP31]] to i32
+; X32-NEXT: [[TMP34:%.*]] = zext i8 [[TMP32]] to i32
+; X32-NEXT: [[TMP35:%.*]] = sub i32 [[TMP33]], [[TMP34]]
+; X32-NEXT: br label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP35]], [[LOADBB3]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp11(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i16, i16* [[TMP9]], i16 4
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i16, i16* [[TMP10]], i16 4
+; X64-NEXT: [[TMP13:%.*]] = load i16, i16* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP13]])
+; X64-NEXT: [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]])
+; X64-NEXT: [[TMP17]] = zext i16 [[TMP15]] to i64
+; X64-NEXT: [[TMP18]] = zext i16 [[TMP16]] to i64
+; X64-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP17]], [[TMP18]]
+; X64-NEXT: br i1 [[TMP19]], label [[LOADBB2:%.*]], label [[RES_BLOCK]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[X]], i8 10
+; X64-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[Y]], i8 10
+; X64-NEXT: [[TMP22:%.*]] = load i8, i8* [[TMP20]]
+; X64-NEXT: [[TMP23:%.*]] = load i8, i8* [[TMP21]]
+; X64-NEXT: [[TMP24:%.*]] = zext i8 [[TMP22]] to i32
+; X64-NEXT: [[TMP25:%.*]] = zext i8 [[TMP23]] to i32
+; X64-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], [[TMP25]]
+; X64-NEXT: br label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP26]], [[LOADBB2]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 11)
ret i32 %call
}
define i32 @cmp12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp12(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 12)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp12(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ], [ [[TMP24:%.*]], [[LOADBB2:%.*]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ], [ [[TMP25:%.*]], [[LOADBB2]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 1
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
+; X32-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X32-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br i1 [[TMP17]], label [[LOADBB2]], label [[RES_BLOCK]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP18:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP19:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[TMP18]], i32 2
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP19]], i32 2
+; X32-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP21]]
+; X32-NEXT: [[TMP24]] = call i32 @llvm.bswap.i32(i32 [[TMP22]])
+; X32-NEXT: [[TMP25]] = call i32 @llvm.bswap.i32(i32 [[TMP23]])
+; X32-NEXT: [[TMP26:%.*]] = icmp eq i32 [[TMP24]], [[TMP25]]
+; X32-NEXT: br i1 [[TMP26]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp12(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 2
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 2
+; X64-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X64-NEXT: [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X64-NEXT: [[TMP17]] = zext i32 [[TMP15]] to i64
+; X64-NEXT: [[TMP18]] = zext i32 [[TMP16]] to i64
+; X64-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP17]], [[TMP18]]
+; X64-NEXT: br i1 [[TMP19]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 12)
ret i32 %call
}
define i32 @cmp13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp13(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp13(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ], [ [[TMP24:%.*]], [[LOADBB2:%.*]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ], [ [[TMP25:%.*]], [[LOADBB2]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 1
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
+; X32-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X32-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br i1 [[TMP17]], label [[LOADBB2]], label [[RES_BLOCK]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP18:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP19:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[TMP18]], i32 2
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP19]], i32 2
+; X32-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP21]]
+; X32-NEXT: [[TMP24]] = call i32 @llvm.bswap.i32(i32 [[TMP22]])
+; X32-NEXT: [[TMP25]] = call i32 @llvm.bswap.i32(i32 [[TMP23]])
+; X32-NEXT: [[TMP26:%.*]] = icmp eq i32 [[TMP24]], [[TMP25]]
+; X32-NEXT: br i1 [[TMP26]], label [[LOADBB3:%.*]], label [[RES_BLOCK]]
+; X32: loadbb3:
+; X32-NEXT: [[TMP27:%.*]] = getelementptr i8, i8* [[X]], i8 12
+; X32-NEXT: [[TMP28:%.*]] = getelementptr i8, i8* [[Y]], i8 12
+; X32-NEXT: [[TMP29:%.*]] = load i8, i8* [[TMP27]]
+; X32-NEXT: [[TMP30:%.*]] = load i8, i8* [[TMP28]]
+; X32-NEXT: [[TMP31:%.*]] = zext i8 [[TMP29]] to i32
+; X32-NEXT: [[TMP32:%.*]] = zext i8 [[TMP30]] to i32
+; X32-NEXT: [[TMP33:%.*]] = sub i32 [[TMP31]], [[TMP32]]
+; X32-NEXT: br label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP33]], [[LOADBB3]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp13(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 2
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 2
+; X64-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X64-NEXT: [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X64-NEXT: [[TMP17]] = zext i32 [[TMP15]] to i64
+; X64-NEXT: [[TMP18]] = zext i32 [[TMP16]] to i64
+; X64-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP17]], [[TMP18]]
+; X64-NEXT: br i1 [[TMP19]], label [[LOADBB2:%.*]], label [[RES_BLOCK]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[X]], i8 12
+; X64-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[Y]], i8 12
+; X64-NEXT: [[TMP22:%.*]] = load i8, i8* [[TMP20]]
+; X64-NEXT: [[TMP23:%.*]] = load i8, i8* [[TMP21]]
+; X64-NEXT: [[TMP24:%.*]] = zext i8 [[TMP22]] to i32
+; X64-NEXT: [[TMP25:%.*]] = zext i8 [[TMP23]] to i32
+; X64-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], [[TMP25]]
+; X64-NEXT: br label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP26]], [[LOADBB2]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 13)
ret i32 %call
}
define i32 @cmp14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp14(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp14(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ], [ [[TMP24:%.*]], [[LOADBB2:%.*]] ], [ [[TMP35:%.*]], [[LOADBB3:%.*]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ], [ [[TMP25:%.*]], [[LOADBB2]] ], [ [[TMP36:%.*]], [[LOADBB3]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 1
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
+; X32-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X32-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br i1 [[TMP17]], label [[LOADBB2]], label [[RES_BLOCK]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP18:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP19:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[TMP18]], i32 2
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP19]], i32 2
+; X32-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP21]]
+; X32-NEXT: [[TMP24]] = call i32 @llvm.bswap.i32(i32 [[TMP22]])
+; X32-NEXT: [[TMP25]] = call i32 @llvm.bswap.i32(i32 [[TMP23]])
+; X32-NEXT: [[TMP26:%.*]] = icmp eq i32 [[TMP24]], [[TMP25]]
+; X32-NEXT: br i1 [[TMP26]], label [[LOADBB3]], label [[RES_BLOCK]]
+; X32: loadbb3:
+; X32-NEXT: [[TMP27:%.*]] = bitcast i8* [[X]] to i16*
+; X32-NEXT: [[TMP28:%.*]] = bitcast i8* [[Y]] to i16*
+; X32-NEXT: [[TMP29:%.*]] = getelementptr i16, i16* [[TMP27]], i16 6
+; X32-NEXT: [[TMP30:%.*]] = getelementptr i16, i16* [[TMP28]], i16 6
+; X32-NEXT: [[TMP31:%.*]] = load i16, i16* [[TMP29]]
+; X32-NEXT: [[TMP32:%.*]] = load i16, i16* [[TMP30]]
+; X32-NEXT: [[TMP33:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP31]])
+; X32-NEXT: [[TMP34:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP32]])
+; X32-NEXT: [[TMP35]] = zext i16 [[TMP33]] to i32
+; X32-NEXT: [[TMP36]] = zext i16 [[TMP34]] to i32
+; X32-NEXT: [[TMP37:%.*]] = icmp eq i32 [[TMP35]], [[TMP36]]
+; X32-NEXT: br i1 [[TMP37]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB3]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp14(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ], [ [[TMP28:%.*]], [[LOADBB2:%.*]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ], [ [[TMP29:%.*]], [[LOADBB2]] ]
+; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 2
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 2
+; X64-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X64-NEXT: [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X64-NEXT: [[TMP17]] = zext i32 [[TMP15]] to i64
+; X64-NEXT: [[TMP18]] = zext i32 [[TMP16]] to i64
+; X64-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP17]], [[TMP18]]
+; X64-NEXT: br i1 [[TMP19]], label [[LOADBB2]], label [[RES_BLOCK]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP20:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP21:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP22:%.*]] = getelementptr i16, i16* [[TMP20]], i16 6
+; X64-NEXT: [[TMP23:%.*]] = getelementptr i16, i16* [[TMP21]], i16 6
+; X64-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP22]]
+; X64-NEXT: [[TMP25:%.*]] = load i16, i16* [[TMP23]]
+; X64-NEXT: [[TMP26:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP24]])
+; X64-NEXT: [[TMP27:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP25]])
+; X64-NEXT: [[TMP28]] = zext i16 [[TMP26]] to i64
+; X64-NEXT: [[TMP29]] = zext i16 [[TMP27]] to i64
+; X64-NEXT: [[TMP30:%.*]] = icmp eq i64 [[TMP28]], [[TMP29]]
+; X64-NEXT: br i1 [[TMP30]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 14)
ret i32 %call
}
define i32 @cmp15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp15(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp15(
+; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
+; X32-NEXT: ret i32 [[CALL]]
+;
+; X64-LABEL: @cmp15(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ], [ [[TMP28:%.*]], [[LOADBB2:%.*]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ], [ [[TMP29:%.*]], [[LOADBB2]] ]
+; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 2
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 2
+; X64-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X64-NEXT: [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X64-NEXT: [[TMP17]] = zext i32 [[TMP15]] to i64
+; X64-NEXT: [[TMP18]] = zext i32 [[TMP16]] to i64
+; X64-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP17]], [[TMP18]]
+; X64-NEXT: br i1 [[TMP19]], label [[LOADBB2]], label [[RES_BLOCK]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP20:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP21:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP22:%.*]] = getelementptr i16, i16* [[TMP20]], i16 6
+; X64-NEXT: [[TMP23:%.*]] = getelementptr i16, i16* [[TMP21]], i16 6
+; X64-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP22]]
+; X64-NEXT: [[TMP25:%.*]] = load i16, i16* [[TMP23]]
+; X64-NEXT: [[TMP26:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP24]])
+; X64-NEXT: [[TMP27:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP25]])
+; X64-NEXT: [[TMP28]] = zext i16 [[TMP26]] to i64
+; X64-NEXT: [[TMP29]] = zext i16 [[TMP27]] to i64
+; X64-NEXT: [[TMP30:%.*]] = icmp eq i64 [[TMP28]], [[TMP29]]
+; X64-NEXT: br i1 [[TMP30]], label [[LOADBB3:%.*]], label [[RES_BLOCK]]
+; X64: loadbb3:
+; X64-NEXT: [[TMP31:%.*]] = getelementptr i8, i8* [[X]], i8 14
+; X64-NEXT: [[TMP32:%.*]] = getelementptr i8, i8* [[Y]], i8 14
+; X64-NEXT: [[TMP33:%.*]] = load i8, i8* [[TMP31]]
+; X64-NEXT: [[TMP34:%.*]] = load i8, i8* [[TMP32]]
+; X64-NEXT: [[TMP35:%.*]] = zext i8 [[TMP33]] to i32
+; X64-NEXT: [[TMP36:%.*]] = zext i8 [[TMP34]] to i32
+; X64-NEXT: [[TMP37:%.*]] = sub i32 [[TMP35]], [[TMP36]]
+; X64-NEXT: br label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP37]], [[LOADBB3]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 15)
ret i32 %call
}
define i32 @cmp16(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp16(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 16)
-; ALL-NEXT: ret i32 [[CALL]]
+; X32-LABEL: @cmp16(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]])
+; X32-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
+; X32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
+; X32-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X32: res_block:
+; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ], [ [[TMP24:%.*]], [[LOADBB2:%.*]] ], [ [[TMP33:%.*]], [[LOADBB3:%.*]] ]
+; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ], [ [[TMP25:%.*]], [[LOADBB2]] ], [ [[TMP34:%.*]], [[LOADBB3]] ]
+; X32-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
+; X32-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP9]], i32 1
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
+; X32-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP11]]
+; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
+; X32-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]])
+; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
+; X32-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; X32-NEXT: br i1 [[TMP17]], label [[LOADBB2]], label [[RES_BLOCK]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP18:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP19:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[TMP18]], i32 2
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP19]], i32 2
+; X32-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP21]]
+; X32-NEXT: [[TMP24]] = call i32 @llvm.bswap.i32(i32 [[TMP22]])
+; X32-NEXT: [[TMP25]] = call i32 @llvm.bswap.i32(i32 [[TMP23]])
+; X32-NEXT: [[TMP26:%.*]] = icmp eq i32 [[TMP24]], [[TMP25]]
+; X32-NEXT: br i1 [[TMP26]], label [[LOADBB3]], label [[RES_BLOCK]]
+; X32: loadbb3:
+; X32-NEXT: [[TMP27:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP28:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP29:%.*]] = getelementptr i32, i32* [[TMP27]], i32 3
+; X32-NEXT: [[TMP30:%.*]] = getelementptr i32, i32* [[TMP28]], i32 3
+; X32-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP29]]
+; X32-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP30]]
+; X32-NEXT: [[TMP33]] = call i32 @llvm.bswap.i32(i32 [[TMP31]])
+; X32-NEXT: [[TMP34]] = call i32 @llvm.bswap.i32(i32 [[TMP32]])
+; X32-NEXT: [[TMP35:%.*]] = icmp eq i32 [[TMP33]], [[TMP34]]
+; X32-NEXT: br i1 [[TMP35]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB3]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X32-NEXT: ret i32 [[PHI_RES]]
+;
+; X64-LABEL: @cmp16(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
+; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP4]], [[TMP5]]
+; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
+; X64: res_block:
+; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ]
+; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
+; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[X]] to i64*
+; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[Y]] to i64*
+; X64-NEXT: [[TMP11:%.*]] = getelementptr i64, i64* [[TMP9]], i64 1
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i64, i64* [[TMP10]], i64 1
+; X64-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP11]]
+; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]]
+; X64-NEXT: [[TMP15]] = call i64 @llvm.bswap.i64(i64 [[TMP13]])
+; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]])
+; X64-NEXT: [[TMP17:%.*]] = icmp eq i64 [[TMP15]], [[TMP16]]
+; X64-NEXT: br i1 [[TMP17]], label [[ENDBLOCK]], label [[RES_BLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ]
+; X64-NEXT: ret i32 [[PHI_RES]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16)
ret i32 %call
@@ -190,8 +1088,25 @@ define i32 @cmp_eq2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq3(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 3)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; ALL-NEXT: loadbb:
+; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i16*
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i16*
+; ALL-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP0]]
+; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
+; ALL-NEXT: [[TMP4:%.*]] = icmp ne i16 [[TMP2]], [[TMP3]]
+; ALL-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; ALL: res_block:
+; ALL-NEXT: br label [[ENDBLOCK:%.*]]
+; ALL: loadbb1:
+; ALL-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i8 2
+; ALL-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[Y]], i8 2
+; ALL-NEXT: [[TMP7:%.*]] = load i8, i8* [[TMP5]]
+; ALL-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
+; ALL-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP7]], [[TMP8]]
+; ALL-NEXT: br i1 [[TMP9]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; ALL: endblock:
+; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
;
@@ -221,8 +1136,25 @@ define i32 @cmp_eq4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq5(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 5)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; ALL-NEXT: loadbb:
+; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; ALL-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; ALL-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; ALL: res_block:
+; ALL-NEXT: br label [[ENDBLOCK:%.*]]
+; ALL: loadbb1:
+; ALL-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i8 4
+; ALL-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[Y]], i8 4
+; ALL-NEXT: [[TMP7:%.*]] = load i8, i8* [[TMP5]]
+; ALL-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
+; ALL-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP7]], [[TMP8]]
+; ALL-NEXT: br i1 [[TMP9]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; ALL: endblock:
+; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
;
@@ -234,8 +1166,27 @@ define i32 @cmp_eq5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq6(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 6)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; ALL-NEXT: loadbb:
+; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; ALL-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; ALL-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; ALL: res_block:
+; ALL-NEXT: br label [[ENDBLOCK:%.*]]
+; ALL: loadbb1:
+; ALL-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i16*
+; ALL-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i16*
+; ALL-NEXT: [[TMP7:%.*]] = getelementptr i16, i16* [[TMP5]], i16 2
+; ALL-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 2
+; ALL-NEXT: [[TMP9:%.*]] = load i16, i16* [[TMP7]]
+; ALL-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; ALL-NEXT: [[TMP11:%.*]] = icmp ne i16 [[TMP9]], [[TMP10]]
+; ALL-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; ALL: endblock:
+; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
;
@@ -247,8 +1198,34 @@ define i32 @cmp_eq6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; ALL-LABEL: @cmp_eq7(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 7)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; ALL-NEXT: loadbb:
+; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; ALL-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; ALL-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; ALL: res_block:
+; ALL-NEXT: br label [[ENDBLOCK:%.*]]
+; ALL: loadbb1:
+; ALL-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i16*
+; ALL-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i16*
+; ALL-NEXT: [[TMP7:%.*]] = getelementptr i16, i16* [[TMP5]], i16 2
+; ALL-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 2
+; ALL-NEXT: [[TMP9:%.*]] = load i16, i16* [[TMP7]]
+; ALL-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; ALL-NEXT: [[TMP11:%.*]] = icmp ne i16 [[TMP9]], [[TMP10]]
+; ALL-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; ALL: loadbb2:
+; ALL-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[X]], i8 6
+; ALL-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[Y]], i8 6
+; ALL-NEXT: [[TMP14:%.*]] = load i8, i8* [[TMP12]]
+; ALL-NEXT: [[TMP15:%.*]] = load i8, i8* [[TMP13]]
+; ALL-NEXT: [[TMP16:%.*]] = icmp ne i8 [[TMP14]], [[TMP15]]
+; ALL-NEXT: br i1 [[TMP16]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; ALL: endblock:
+; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ 1, [[RES_BLOCK]] ]
+; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; ALL-NEXT: ret i32 [[CONV]]
;
@@ -260,8 +1237,27 @@ define i32 @cmp_eq7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
define i32 @cmp_eq8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
; X32-LABEL: @cmp_eq8(
-; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 8)
-; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; X32-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X32: res_block:
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
+; X32-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X32-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; X32-NEXT: ret i32 [[CONV]]
;
@@ -283,11 +1279,60 @@ define i32 @cmp_eq8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp_eq9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp_eq9(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 9)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT: ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq9(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; X32-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X32: res_block:
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
+; X32-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X32-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X32-NEXT: [[TMP14:%.*]] = load i8, i8* [[TMP12]]
+; X32-NEXT: [[TMP15:%.*]] = load i8, i8* [[TMP13]]
+; X32-NEXT: [[TMP16:%.*]] = icmp ne i8 [[TMP14]], [[TMP15]]
+; X32-NEXT: br i1 [[TMP16]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ 1, [[RES_BLOCK]] ]
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT: ret i32 [[CONV]]
+;
+; X64-LABEL: @cmp_eq9(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
+; X64-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64: res_block:
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i8 8
+; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[Y]], i8 8
+; X64-NEXT: [[TMP7:%.*]] = load i8, i8* [[TMP5]]
+; X64-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
+; X64-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP7]], [[TMP8]]
+; X64-NEXT: br i1 [[TMP9]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64-NEXT: ret i32 [[CONV]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 9)
%cmp = icmp eq i32 %call, 0
@@ -296,11 +1341,64 @@ define i32 @cmp_eq9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp_eq10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp_eq10(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT: ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq10(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; X32-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X32: res_block:
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
+; X32-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X32-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP12:%.*]] = bitcast i8* [[X]] to i16*
+; X32-NEXT: [[TMP13:%.*]] = bitcast i8* [[Y]] to i16*
+; X32-NEXT: [[TMP14:%.*]] = getelementptr i16, i16* [[TMP12]], i16 4
+; X32-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[TMP13]], i16 4
+; X32-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP14]]
+; X32-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP15]]
+; X32-NEXT: [[TMP18:%.*]] = icmp ne i16 [[TMP16]], [[TMP17]]
+; X32-NEXT: br i1 [[TMP18]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ 1, [[RES_BLOCK]] ]
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT: ret i32 [[CONV]]
+;
+; X64-LABEL: @cmp_eq10(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
+; X64-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64: res_block:
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP7:%.*]] = getelementptr i16, i16* [[TMP5]], i16 4
+; X64-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 4
+; X64-NEXT: [[TMP9:%.*]] = load i16, i16* [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; X64-NEXT: [[TMP11:%.*]] = icmp ne i16 [[TMP9]], [[TMP10]]
+; X64-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64-NEXT: ret i32 [[CONV]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 10)
%cmp = icmp eq i32 %call, 0
@@ -309,11 +1407,78 @@ define i32 @cmp_eq10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp_eq11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp_eq11(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT: ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq11(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; X32-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X32: res_block:
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
+; X32-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X32-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP12:%.*]] = bitcast i8* [[X]] to i16*
+; X32-NEXT: [[TMP13:%.*]] = bitcast i8* [[Y]] to i16*
+; X32-NEXT: [[TMP14:%.*]] = getelementptr i16, i16* [[TMP12]], i16 4
+; X32-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[TMP13]], i16 4
+; X32-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP14]]
+; X32-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP15]]
+; X32-NEXT: [[TMP18:%.*]] = icmp ne i16 [[TMP16]], [[TMP17]]
+; X32-NEXT: br i1 [[TMP18]], label [[RES_BLOCK]], label [[LOADBB3:%.*]]
+; X32: loadbb3:
+; X32-NEXT: [[TMP19:%.*]] = getelementptr i8, i8* [[X]], i8 10
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[Y]], i8 10
+; X32-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP19]]
+; X32-NEXT: [[TMP22:%.*]] = load i8, i8* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = icmp ne i8 [[TMP21]], [[TMP22]]
+; X32-NEXT: br i1 [[TMP23]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB3]] ], [ 1, [[RES_BLOCK]] ]
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT: ret i32 [[CONV]]
+;
+; X64-LABEL: @cmp_eq11(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
+; X64-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64: res_block:
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP7:%.*]] = getelementptr i16, i16* [[TMP5]], i16 4
+; X64-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 4
+; X64-NEXT: [[TMP9:%.*]] = load i16, i16* [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
+; X64-NEXT: [[TMP11:%.*]] = icmp ne i16 [[TMP9]], [[TMP10]]
+; X64-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[X]], i8 10
+; X64-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[Y]], i8 10
+; X64-NEXT: [[TMP14:%.*]] = load i8, i8* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = load i8, i8* [[TMP13]]
+; X64-NEXT: [[TMP16:%.*]] = icmp ne i8 [[TMP14]], [[TMP15]]
+; X64-NEXT: br i1 [[TMP16]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ 1, [[RES_BLOCK]] ]
+; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64-NEXT: ret i32 [[CONV]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 11)
%cmp = icmp eq i32 %call, 0
@@ -322,11 +1487,64 @@ define i32 @cmp_eq11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp_eq12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp_eq12(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 12)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT: ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq12(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; X32-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X32: res_block:
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
+; X32-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X32-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP12:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP13:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[TMP12]], i32 2
+; X32-NEXT: [[TMP15:%.*]] = getelementptr i32, i32* [[TMP13]], i32 2
+; X32-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP14]]
+; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP15]]
+; X32-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP16]], [[TMP17]]
+; X32-NEXT: br i1 [[TMP18]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ 1, [[RES_BLOCK]] ]
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT: ret i32 [[CONV]]
+;
+; X64-LABEL: @cmp_eq12(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
+; X64-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64: res_block:
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X64-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 2
+; X64-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 2
+; X64-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X64-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X64-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64-NEXT: ret i32 [[CONV]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 12)
%cmp = icmp eq i32 %call, 0
@@ -335,11 +1553,78 @@ define i32 @cmp_eq12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp_eq13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp_eq13(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT: ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq13(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; X32-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X32: res_block:
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
+; X32-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X32-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP12:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP13:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[TMP12]], i32 2
+; X32-NEXT: [[TMP15:%.*]] = getelementptr i32, i32* [[TMP13]], i32 2
+; X32-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP14]]
+; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP15]]
+; X32-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP16]], [[TMP17]]
+; X32-NEXT: br i1 [[TMP18]], label [[RES_BLOCK]], label [[LOADBB3:%.*]]
+; X32: loadbb3:
+; X32-NEXT: [[TMP19:%.*]] = getelementptr i8, i8* [[X]], i8 12
+; X32-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[Y]], i8 12
+; X32-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP19]]
+; X32-NEXT: [[TMP22:%.*]] = load i8, i8* [[TMP20]]
+; X32-NEXT: [[TMP23:%.*]] = icmp ne i8 [[TMP21]], [[TMP22]]
+; X32-NEXT: br i1 [[TMP23]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB3]] ], [ 1, [[RES_BLOCK]] ]
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT: ret i32 [[CONV]]
+;
+; X64-LABEL: @cmp_eq13(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
+; X64-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64: res_block:
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X64-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 2
+; X64-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 2
+; X64-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X64-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X64-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[X]], i8 12
+; X64-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[Y]], i8 12
+; X64-NEXT: [[TMP14:%.*]] = load i8, i8* [[TMP12]]
+; X64-NEXT: [[TMP15:%.*]] = load i8, i8* [[TMP13]]
+; X64-NEXT: [[TMP16:%.*]] = icmp ne i8 [[TMP14]], [[TMP15]]
+; X64-NEXT: br i1 [[TMP16]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ 1, [[RES_BLOCK]] ]
+; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64-NEXT: ret i32 [[CONV]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 13)
%cmp = icmp eq i32 %call, 0
@@ -348,11 +1633,82 @@ define i32 @cmp_eq13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp_eq14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp_eq14(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT: ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq14(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; X32-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X32: res_block:
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
+; X32-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X32-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP12:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP13:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[TMP12]], i32 2
+; X32-NEXT: [[TMP15:%.*]] = getelementptr i32, i32* [[TMP13]], i32 2
+; X32-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP14]]
+; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP15]]
+; X32-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP16]], [[TMP17]]
+; X32-NEXT: br i1 [[TMP18]], label [[RES_BLOCK]], label [[LOADBB3:%.*]]
+; X32: loadbb3:
+; X32-NEXT: [[TMP19:%.*]] = bitcast i8* [[X]] to i16*
+; X32-NEXT: [[TMP20:%.*]] = bitcast i8* [[Y]] to i16*
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i16, i16* [[TMP19]], i16 6
+; X32-NEXT: [[TMP22:%.*]] = getelementptr i16, i16* [[TMP20]], i16 6
+; X32-NEXT: [[TMP23:%.*]] = load i16, i16* [[TMP21]]
+; X32-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP22]]
+; X32-NEXT: [[TMP25:%.*]] = icmp ne i16 [[TMP23]], [[TMP24]]
+; X32-NEXT: br i1 [[TMP25]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB3]] ], [ 1, [[RES_BLOCK]] ]
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT: ret i32 [[CONV]]
+;
+; X64-LABEL: @cmp_eq14(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
+; X64-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64: res_block:
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X64-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 2
+; X64-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 2
+; X64-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X64-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X64-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP14:%.*]] = getelementptr i16, i16* [[TMP12]], i16 6
+; X64-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[TMP13]], i16 6
+; X64-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP14]]
+; X64-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP15]]
+; X64-NEXT: [[TMP18:%.*]] = icmp ne i16 [[TMP16]], [[TMP17]]
+; X64-NEXT: br i1 [[TMP18]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ 1, [[RES_BLOCK]] ]
+; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64-NEXT: ret i32 [[CONV]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 14)
%cmp = icmp eq i32 %call, 0
@@ -361,11 +1717,52 @@ define i32 @cmp_eq14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp_eq15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp_eq15(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT: ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq15(
+; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
+; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT: ret i32 [[CONV]]
+;
+; X64-LABEL: @cmp_eq15(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
+; X64-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64: res_block:
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X64-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 2
+; X64-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 2
+; X64-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X64-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X64-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X64: loadbb2:
+; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[X]] to i16*
+; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[Y]] to i16*
+; X64-NEXT: [[TMP14:%.*]] = getelementptr i16, i16* [[TMP12]], i16 6
+; X64-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[TMP13]], i16 6
+; X64-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP14]]
+; X64-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP15]]
+; X64-NEXT: [[TMP18:%.*]] = icmp ne i16 [[TMP16]], [[TMP17]]
+; X64-NEXT: br i1 [[TMP18]], label [[RES_BLOCK]], label [[LOADBB3:%.*]]
+; X64: loadbb3:
+; X64-NEXT: [[TMP19:%.*]] = getelementptr i8, i8* [[X]], i8 14
+; X64-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[Y]], i8 14
+; X64-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP19]]
+; X64-NEXT: [[TMP22:%.*]] = load i8, i8* [[TMP20]]
+; X64-NEXT: [[TMP23:%.*]] = icmp ne i8 [[TMP21]], [[TMP22]]
+; X64-NEXT: br i1 [[TMP23]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB3]] ], [ 1, [[RES_BLOCK]] ]
+; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64-NEXT: ret i32 [[CONV]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 15)
%cmp = icmp eq i32 %call, 0
@@ -374,11 +1771,73 @@ define i32 @cmp_eq15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
}
define i32 @cmp_eq16(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; ALL-LABEL: @cmp_eq16(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 16)
-; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; ALL-NEXT: ret i32 [[CONV]]
+; X32-LABEL: @cmp_eq16(
+; X32-NEXT: loadbb:
+; X32-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32*
+; X32-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]]
+; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
+; X32-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[TMP3]]
+; X32-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X32: res_block:
+; X32-NEXT: br label [[ENDBLOCK:%.*]]
+; X32: loadbb1:
+; X32-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
+; X32-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP7]]
+; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
+; X32-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]]
+; X32-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[LOADBB2:%.*]]
+; X32: loadbb2:
+; X32-NEXT: [[TMP12:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP13:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[TMP12]], i32 2
+; X32-NEXT: [[TMP15:%.*]] = getelementptr i32, i32* [[TMP13]], i32 2
+; X32-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP14]]
+; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP15]]
+; X32-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP16]], [[TMP17]]
+; X32-NEXT: br i1 [[TMP18]], label [[RES_BLOCK]], label [[LOADBB3:%.*]]
+; X32: loadbb3:
+; X32-NEXT: [[TMP19:%.*]] = bitcast i8* [[X]] to i32*
+; X32-NEXT: [[TMP20:%.*]] = bitcast i8* [[Y]] to i32*
+; X32-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP19]], i32 3
+; X32-NEXT: [[TMP22:%.*]] = getelementptr i32, i32* [[TMP20]], i32 3
+; X32-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP21]]
+; X32-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP22]]
+; X32-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP23]], [[TMP24]]
+; X32-NEXT: br i1 [[TMP25]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X32: endblock:
+; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB3]] ], [ 1, [[RES_BLOCK]] ]
+; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X32-NEXT: ret i32 [[CONV]]
+;
+; X64-LABEL: @cmp_eq16(
+; X64-NEXT: loadbb:
+; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64*
+; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64*
+; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]]
+; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
+; X64-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
+; X64-NEXT: br i1 [[TMP4]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
+; X64: res_block:
+; X64-NEXT: br label [[ENDBLOCK:%.*]]
+; X64: loadbb1:
+; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[X]] to i64*
+; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[Y]] to i64*
+; X64-NEXT: [[TMP7:%.*]] = getelementptr i64, i64* [[TMP5]], i64 1
+; X64-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[TMP6]], i64 1
+; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP7]]
+; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]]
+; X64-NEXT: [[TMP11:%.*]] = icmp ne i64 [[TMP9]], [[TMP10]]
+; X64-NEXT: br i1 [[TMP11]], label [[RES_BLOCK]], label [[ENDBLOCK]]
+; X64: endblock:
+; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
+; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
+; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; X64-NEXT: ret i32 [[CONV]]
;
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16)
%cmp = icmp eq i32 %call, 0
diff --git a/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll b/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
index b6b7757978263..088b177c2e11a 100644
--- a/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
+++ b/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
@@ -194,7 +194,6 @@ rare.2:
br label %fallthrough
}
-
declare void @slowpath(i32, i32*)
; Make sure we don't end up in an infinite loop after we fail to sink.
@@ -218,3 +217,37 @@ load.i145:
pl_loop.i.i122:
br label %pl_loop.i.i122
}
+
+; Make sure we can sink address computation even
+; if there is a cycle in phi nodes.
+define void @test9(i1 %cond, i64* %base) {
+; CHECK-LABEL: @test9
+entry:
+ %addr = getelementptr inbounds i64, i64* %base, i64 5
+ %casted = bitcast i64* %addr to i32*
+ br label %header
+
+header:
+ %iv = phi i32 [0, %entry], [%iv.inc, %backedge]
+ %casted.loop = phi i32* [%casted, %entry], [%casted.merged, %backedge]
+ br i1 %cond, label %if.then, label %backedge
+
+if.then:
+ call void @foo(i32 %iv)
+ %addr.1 = getelementptr inbounds i64, i64* %base, i64 5
+ %casted.1 = bitcast i64* %addr.1 to i32*
+ br label %backedge
+
+backedge:
+; CHECK-LABEL: backedge:
+; CHECK: getelementptr i8, {{.+}} 40
+ %casted.merged = phi i32* [%casted.loop, %header], [%casted.1, %if.then]
+ %v = load i32, i32* %casted.merged, align 4
+ call void @foo(i32 %v)
+ %iv.inc = add i32 %iv, 1
+ %cmp = icmp slt i32 %iv.inc, 1000
+ br i1 %cmp, label %header, label %exit
+
+exit:
+ ret void
+}
diff --git a/test/Transforms/EarlyCSE/globalsaa-memoryssa.ll b/test/Transforms/EarlyCSE/globalsaa-memoryssa.ll
new file mode 100644
index 0000000000000..57dbdd8831902
--- /dev/null
+++ b/test/Transforms/EarlyCSE/globalsaa-memoryssa.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -globals-aa -early-cse-memssa | FileCheck %s
+
+define i16 @f1() readonly {
+ ret i16 0
+}
+
+declare void @f2()
+
+; Check that EarlyCSE correctly handles function calls that don't have
+; a MemoryAccess. In this case the calls to @f1 have no
+; MemoryAccesses since globals-aa determines that @f1 doesn't
+; read/write memory at all.
+
+define void @f3() {
+; CHECK-LABEL: @f3(
+; CHECK-NEXT: [[CALL1:%.*]] = call i16 @f1()
+; CHECK-NEXT: call void @f2()
+; CHECK-NEXT: ret void
+;
+ %call1 = call i16 @f1()
+ call void @f2()
+ %call2 = call i16 @f1()
+ ret void
+}
diff --git a/test/Transforms/GVN/PRE/2017-06-28-pre-load-dbgloc.ll b/test/Transforms/GVN/PRE/2017-06-28-pre-load-dbgloc.ll
new file mode 100644
index 0000000000000..513379d0bd017
--- /dev/null
+++ b/test/Transforms/GVN/PRE/2017-06-28-pre-load-dbgloc.ll
@@ -0,0 +1,79 @@
+; This test checks if debug loc is propagated to load/store created by GVN/Instcombine.
+; RUN: opt < %s -gvn -S | FileCheck %s --check-prefixes=ALL,GVN
+; RUN: opt < %s -gvn -instcombine -S | FileCheck %s --check-prefixes=ALL,INSTCOMBINE
+
+; struct node {
+; int *v;
+; struct desc *descs;
+; };
+
+; struct desc {
+; struct node *node;
+; };
+
+; extern int bar(void *v, void* n);
+
+; int test(struct desc *desc)
+; {
+; void *v, *n;
+; v = !desc ? ((void *)0) : desc->node->v; // Line 15
+; n = &desc->node->descs[0]; // Line 16
+; return bar(v, n);
+; }
+
+; Line 16, Column 13:
+; n = &desc->node->descs[0];
+; ^
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+%struct.desc = type { %struct.node* }
+%struct.node = type { i32*, %struct.desc* }
+
+define i32 @test(%struct.desc* readonly %desc) local_unnamed_addr #0 !dbg !4 {
+entry:
+ %tobool = icmp eq %struct.desc* %desc, null
+ br i1 %tobool, label %cond.end, label %cond.false, !dbg !9
+; ALL: br i1 %tobool, label %entry.cond.end_crit_edge, label %cond.false, !dbg [[LOC_15_6:![0-9]+]]
+; ALL: entry.cond.end_crit_edge:
+; GVN: %.pre = load %struct.node*, %struct.node** null, align 8, !dbg [[LOC_16_13:![0-9]+]]
+; INSTCOMBINE:store %struct.node* undef, %struct.node** null, align 536870912, !dbg [[LOC_16_13:![0-9]+]]
+
+cond.false:
+ %0 = bitcast %struct.desc* %desc to i8***, !dbg !11
+ %1 = load i8**, i8*** %0, align 8, !dbg !11
+ %2 = load i8*, i8** %1, align 8
+ br label %cond.end, !dbg !9
+
+cond.end:
+ %3 = phi i8* [ %2, %cond.false ], [ null, %entry ], !dbg !9
+ %node2 = getelementptr inbounds %struct.desc, %struct.desc* %desc, i64 0, i32 0
+ %4 = load %struct.node*, %struct.node** %node2, align 8, !dbg !10
+ %descs = getelementptr inbounds %struct.node, %struct.node* %4, i64 0, i32 1
+ %5 = bitcast %struct.desc** %descs to i8**
+ %6 = load i8*, i8** %5, align 8
+ %call = tail call i32 @bar(i8* %3, i8* %6)
+ ret i32 %call
+}
+
+declare i32 @bar(i8*, i8*) local_unnamed_addr #1
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, emissionKind: FullDebug)
+!1 = !DIFile(filename: "test.c", directory: ".")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 12, type: !5, isLocal: false, isDefinition: true, scopeLine: 13, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !8)
+!5 = !DISubroutineType(types: !6)
+!6 = !{!7}
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !{}
+!9 = !DILocation(line: 15, column: 6, scope: !4)
+!10 = !DILocation(line: 16, column: 13, scope: !4)
+!11 = !DILocation(line: 15, column: 34, scope: !4)
+
+;ALL: [[SCOPE:![0-9]+]] = distinct !DISubprogram(name: "test",{{.*}}
+;ALL: [[LOC_15_6]] = !DILocation(line: 15, column: 6, scope: [[SCOPE]])
+;ALL: [[LOC_16_13]] = !DILocation(line: 16, column: 13, scope: [[SCOPE]])
diff --git a/test/Transforms/GVN/PRE/phi-translate.ll b/test/Transforms/GVN/PRE/phi-translate.ll
index 1f6c7c8d33ea7..55f5fd6465b68 100644
--- a/test/Transforms/GVN/PRE/phi-translate.ll
+++ b/test/Transforms/GVN/PRE/phi-translate.ll
@@ -6,12 +6,12 @@ target datalayout = "e-p:64:64:64"
; CHECK: entry.end_crit_edge:
; CHECK: %[[INDEX:[a-z0-9.]+]] = sext i32 %x to i64{{$}}
; CHECK: %[[ADDRESS:[a-z0-9.]+]] = getelementptr [100 x i32], [100 x i32]* @G, i64 0, i64 %[[INDEX]]{{$}}
-; CHECK: %n.pre = load i32, i32* %[[ADDRESS]]{{$}}
+; CHECK: %n.pre = load i32, i32* %[[ADDRESS]], !dbg [[N_LOC:![0-9]+]]
; CHECK: br label %end
; CHECK: then:
; CHECK: store i32 %z
; CHECK: end:
-; CHECK: %n = phi i32 [ %n.pre, %entry.end_crit_edge ], [ %z, %then ], !dbg [[N_LOC:![0-9]+]]
+; CHECK: %n = phi i32 [ %n.pre, %entry.end_crit_edge ], [ %z, %then ], !dbg [[N_LOC]]
; CHECK: ret i32 %n
; CHECK: [[N_LOC]] = !DILocation(line: 47, column: 1, scope: !{{.*}})
diff --git a/test/Transforms/GlobalOpt/pr33686.ll b/test/Transforms/GlobalOpt/pr33686.ll
new file mode 100644
index 0000000000000..d6bb98735f4e8
--- /dev/null
+++ b/test/Transforms/GlobalOpt/pr33686.ll
@@ -0,0 +1,17 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -globalopt %s | FileCheck %s
+
+@glob = external global i16, align 1
+
+define void @beth() {
+; CHECK-LABEL: @beth(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret void
+;
+entry:
+ ret void
+
+notreachable:
+ %patatino = select i1 undef, i16* @glob, i16* %patatino
+ br label %notreachable
+}
diff --git a/test/Transforms/IRCE/eq_ne.ll b/test/Transforms/IRCE/eq_ne.ll
new file mode 100644
index 0000000000000..1b1ffe6b94ba7
--- /dev/null
+++ b/test/Transforms/IRCE/eq_ne.ll
@@ -0,0 +1,257 @@
+; RUN: opt -verify-loop-info -irce-print-changed-loops -irce -S < %s 2>&1 | FileCheck %s
+
+; CHECK: irce: in function test_01: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+; CHECK-NOT: irce: in function test_02: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+; CHECK: irce: in function test_03: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+; CHECK-NOT: irce: in function test_04: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+; CHECK: irce: in function test_05: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+; CHECK-NOT: irce: in function test_06: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+; CHECK: irce: in function test_07: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+; CHECK-NOT: irce: in function test_08: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+
+; Show that IRCE can turn 'ne' condition to 'slt' in increasing IV.
+define void @test_01(i32* %arr, i32* %a_len_ptr) #0 {
+
+; CHECK: test_01
+; CHECK: main.exit.selector:
+; CHECK-NEXT: [[PSEUDO_PHI:%[^ ]+]] = phi i32 [ %idx.next, %in.bounds ]
+; CHECK-NEXT: [[COND:%[^ ]+]] = icmp slt i32 [[PSEUDO_PHI]], 100
+; CHECK-NEXT: br i1 [[COND]]
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 0, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, 1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp ne i32 %idx.next, 100
+ br i1 %next, label %loop, label %exit
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+; Show that if n is not known to be greater than the starting value, IRCE
+; doesn't apply.
+define void @test_02(i32* %arr, i32* %a_len_ptr) #0 {
+
+; CHECK: test_02(
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 0, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, 1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp ne i32 %idx.next, -100
+ br i1 %next, label %loop, label %exit
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+; Show that IRCE can turn 'eq' condition to 'sge' in increasing IV.
+define void @test_03(i32* %arr, i32* %a_len_ptr) #0 {
+
+; CHECK: test_03(
+; CHECK: main.exit.selector:
+; CHECK-NEXT: [[PSEUDO_PHI:%[^ ]+]] = phi i32 [ %idx.next, %in.bounds ]
+; CHECK-NEXT: [[COND:%[^ ]+]] = icmp slt i32 [[PSEUDO_PHI]], 100
+; CHECK-NEXT: br i1 [[COND]]
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 0, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, 1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp eq i32 %idx.next, 100
+ br i1 %next, label %exit, label %loop
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+; Show that if n is not known to be greater than the starting value, IRCE
+; doesn't apply.
+define void @test_04(i32* %arr, i32* %a_len_ptr) #0 {
+
+; CHECK: test_04(
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 0, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, 1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp eq i32 %idx.next, -100
+ br i1 %next, label %exit, label %loop
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+; Show that IRCE can turn 'ne' condition to 'sgt' in decreasing IV.
+define void @test_05(i32* %arr, i32* %a_len_ptr) #0 {
+
+; CHECK: test_05(
+; CHECK: preloop.exit.selector:
+; CHECK-NEXT: [[PSEUDO_PHI:%[^ ]+]] = phi i32 [ %idx.next.preloop, %in.bounds.preloop ]
+; CHECK-NEXT: [[COND:%[^ ]+]] = icmp sgt i32 [[PSEUDO_PHI]], 0
+; CHECK-NEXT: br i1 [[COND]]
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 100, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, -1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp ne i32 %idx.next, 0
+ br i1 %next, label %loop, label %exit
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+; Show that IRCE cannot turn 'ne' condition to 'sgt' in decreasing IV if the end
+; value is not proved to be less than the start value.
+define void @test_06(i32* %arr, i32* %a_len_ptr) #0 {
+
+; CHECK: test_06(
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 100, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, -1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp ne i32 %idx.next, 120
+ br i1 %next, label %loop, label %exit
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+; Show that IRCE can turn 'eq' condition to 'slt' in decreasing IV.
+define void @test_07(i32* %arr, i32* %a_len_ptr) #0 {
+
+; CHECK: test_07(
+; CHECK: preloop.exit.selector:
+; CHECK-NEXT: [[PSEUDO_PHI:%[^ ]+]] = phi i32 [ %idx.next.preloop, %in.bounds.preloop ]
+; CHECK-NEXT: [[COND:%[^ ]+]] = icmp sgt i32 [[PSEUDO_PHI]], 0
+; CHECK-NEXT: br i1 [[COND]]
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 100, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, -1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp eq i32 %idx.next, 0
+ br i1 %next, label %exit, label %loop
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+; Show that IRCE cannot turn 'eq' condition to 'slt' in decreasing IV if the end
+; value is not proved to be less than the start value.
+define void @test_08(i32* %arr, i32* %a_len_ptr) #0 {
+
+; CHECK: test_08(
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 100, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, -1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp eq i32 %idx.next, 120
+ br i1 %next, label %exit, label %loop
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+!0 = !{i32 0, i32 50}
+!1 = !{!"branch_weights", i32 64, i32 4}
diff --git a/test/Transforms/IRCE/pre_post_loops.ll b/test/Transforms/IRCE/pre_post_loops.ll
new file mode 100644
index 0000000000000..2cd2e29104fe9
--- /dev/null
+++ b/test/Transforms/IRCE/pre_post_loops.ll
@@ -0,0 +1,117 @@
+; RUN: opt -verify-loop-info -irce-print-changed-loops -irce -S < %s 2>&1 | FileCheck %s
+
+; CHECK: irce: in function test_01: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+; CHECK: irce: in function test_02: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
+
+; Iterate from 0 to SINT_MAX, check that the post-loop is generated.
+define void @test_01(i32* %arr, i32* %a_len_ptr) {
+
+; CHECK: test_01(
+; CHECK: entry:
+; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr
+; CHECK: loop:
+; CHECK-NEXT: %idx = phi i32 [ %idx.next, %in.bounds ], [ 0, %loop.preheader ]
+; CHECK-NEXT: %idx.next = add i32 %idx, 1
+; CHECK-NEXT: %abc = icmp slt i32 %idx, %exit.mainloop.at
+; CHECK-NEXT: br i1 true, label %in.bounds,
+; CHECK: in.bounds:
+; CHECK-NEXT: %addr = getelementptr i32, i32* %arr, i32 %idx
+; CHECK-NEXT: store i32 0, i32* %addr
+; CHECK-NEXT: %next = icmp slt i32 %idx.next, 2147483647
+; CHECK-NEXT: [[COND:%[^ ]+]] = icmp slt i32 %idx.next, %exit.mainloop.at
+; CHECK-NEXT: br i1 [[COND]], label %loop, label %main.exit.selector
+; CHECK: main.pseudo.exit:
+; CHECK-NEXT: %idx.copy = phi i32 [ 0, %entry ], [ %idx.next.lcssa, %main.exit.selector ]
+; CHECK-NEXT: %indvar.end = phi i32 [ 0, %entry ], [ %idx.next.lcssa, %main.exit.selector ]
+; CHECK-NEXT: br label %postloop
+; CHECK: postloop:
+; CHECK-NEXT: br label %loop.postloop
+; CHECK: loop.postloop:
+; CHECK-NEXT: %idx.postloop = phi i32 [ %idx.copy, %postloop ], [ %idx.next.postloop, %in.bounds.postloop ]
+; CHECK-NEXT: %idx.next.postloop = add i32 %idx.postloop, 1
+; CHECK-NEXT: %abc.postloop = icmp slt i32 %idx.postloop, %exit.mainloop.at
+; CHECK-NEXT: br i1 %abc.postloop, label %in.bounds.postloop, label %out.of.bounds.loopexit
+; CHECK: in.bounds.postloop:
+; CHECK-NEXT: %addr.postloop = getelementptr i32, i32* %arr, i32 %idx.postloop
+; CHECK-NEXT: store i32 0, i32* %addr.postloop
+; CHECK-NEXT: %next.postloop = icmp slt i32 %idx.next.postloop, 2147483647
+; CHECK-NEXT: br i1 %next.postloop, label %loop.postloop, label %exit.loopexit
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 0, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, 1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp slt i32 %idx.next, 2147483647
+ br i1 %next, label %loop, label %exit
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+; Iterate from SINT_MAX to 0, check that the pre-loop is generated.
+define void @test_02(i32* %arr, i32* %a_len_ptr) {
+
+; CHECK: test_02(
+; CHECK: entry:
+; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
+; CHECH-NEXT: br i1 true, label %loop.preloop.preheader
+; CHECK: mainloop:
+; CHECK-NEXT: br label %loop
+; CHECK: loop:
+; CHECK-NEXT: %idx = phi i32 [ %idx.preloop.copy, %mainloop ], [ %idx.next, %in.bounds ]
+; CHECK-NEXT: %idx.next = add i32 %idx, -1
+; CHECK-NEXT: %abc = icmp slt i32 %idx, %len
+; CHECK-NEXT: br i1 true, label %in.bounds
+; CHECK: in.bounds:
+; CHECK-NEXT: %addr = getelementptr i32, i32* %arr, i32 %idx
+; CHECK-NEXT: store i32 0, i32* %addr
+; CHECK-NEXT: %next = icmp sgt i32 %idx.next, -1
+; CHECK-NEXT: br i1 %next, label %loop, label %exit.loopexit
+; CHECK: loop.preloop:
+; CHECK-NEXT: %idx.preloop = phi i32 [ %idx.next.preloop, %in.bounds.preloop ], [ 2147483647, %loop.preloop.preheader ]
+; CHECK-NEXT: %idx.next.preloop = add i32 %idx.preloop, -1
+; CHECK-NEXT: %abc.preloop = icmp slt i32 %idx.preloop, %len
+; CHECK-NEXT: br i1 %abc.preloop, label %in.bounds.preloop, label %out.of.bounds.loopexit
+; CHECK: in.bounds.preloop:
+; CHECK-NEXT: %addr.preloop = getelementptr i32, i32* %arr, i32 %idx.preloop
+; CHECK-NEXT: store i32 0, i32* %addr.preloop
+; CHECK-NEXT: %next.preloop = icmp sgt i32 %idx.next.preloop, -1
+; CHECK-NEXT: [[COND:%[^ ]+]] = icmp sgt i32 %idx.next.preloop, -1
+; CHECK-NEXT: br i1 [[COND]], label %loop.preloop, label %preloop.exit.selector
+
+entry:
+ %len = load i32, i32* %a_len_ptr, !range !0
+ br label %loop
+
+loop:
+ %idx = phi i32 [ 2147483647, %entry ], [ %idx.next, %in.bounds ]
+ %idx.next = add i32 %idx, -1
+ %abc = icmp slt i32 %idx, %len
+ br i1 %abc, label %in.bounds, label %out.of.bounds
+
+in.bounds:
+ %addr = getelementptr i32, i32* %arr, i32 %idx
+ store i32 0, i32* %addr
+ %next = icmp sgt i32 %idx.next, -1
+ br i1 %next, label %loop, label %exit
+
+out.of.bounds:
+ ret void
+
+exit:
+ ret void
+}
+
+!0 = !{i32 0, i32 50}
diff --git a/test/Transforms/Inline/AArch64/ext.ll b/test/Transforms/Inline/AArch64/ext.ll
new file mode 100644
index 0000000000000..04095c04ee869
--- /dev/null
+++ b/test/Transforms/Inline/AArch64/ext.ll
@@ -0,0 +1,249 @@
+; REQUIRES: asserts
+; RUN: opt -inline -mtriple=aarch64--linux-gnu -S -debug-only=inline-cost < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+define i32 @outer1(i32* %ptr, i32 %i) {
+ %C = call i32 @inner1(i32* %ptr, i32 %i)
+ ret i32 %C
+}
+
+; sext can be folded into gep.
+; CHECK: Analyzing call of inner1
+; CHECK: NumInstructionsSimplified: 3
+; CHECK: NumInstructions: 4
+define i32 @inner1(i32* %ptr, i32 %i) {
+ %E = sext i32 %i to i64
+ %G = getelementptr inbounds i32, i32* %ptr, i64 %E
+ %L = load i32, i32* %G
+ ret i32 %L
+}
+
+define i32 @outer2(i32* %ptr, i32 %i) {
+ %C = call i32 @inner2(i32* %ptr, i32 %i)
+ ret i32 %C
+}
+
+; zext from i32 to i64 is free.
+; CHECK: Analyzing call of inner2
+; CHECK: NumInstructionsSimplified: 3
+; CHECK: NumInstructions: 4
+define i32 @inner2(i32* %ptr, i32 %i) {
+ %E = zext i32 %i to i64
+ %G = getelementptr inbounds i32, i32* %ptr, i64 %E
+ %L = load i32, i32* %G
+ ret i32 %L
+}
+
+define i32 @outer3(i32* %ptr, i16 %i) {
+ %C = call i32 @inner3(i32* %ptr, i16 %i)
+ ret i32 %C
+}
+
+; zext can be folded into gep.
+; CHECK: Analyzing call of inner3
+; CHECK: NumInstructionsSimplified: 3
+; CHECK: NumInstructions: 4
+define i32 @inner3(i32* %ptr, i16 %i) {
+ %E = zext i16 %i to i64
+ %G = getelementptr inbounds i32, i32* %ptr, i64 %E
+ %L = load i32, i32* %G
+ ret i32 %L
+}
+
+define i16 @outer4(i8* %ptr) {
+ %C = call i16 @inner4(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner4
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner4(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i16
+ ret i16 %E
+}
+
+define i16 @outer5(i8* %ptr) {
+ %C = call i16 @inner5(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner5
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner5(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i16
+ ret i16 %E
+}
+
+define i32 @outer6(i8* %ptr) {
+ %C = call i32 @inner6(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner6
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner6(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer7(i8* %ptr) {
+ %C = call i32 @inner7(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner7
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner7(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer8(i16* %ptr) {
+ %C = call i32 @inner8(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner8
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner8(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer9(i16* %ptr) {
+ %C = call i32 @inner9(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner9
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner9(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i32
+ ret i32 %E
+}
+
+define i64 @outer10(i8* %ptr) {
+ %C = call i64 @inner10(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner10
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner10(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer11(i8* %ptr) {
+ %C = call i64 @inner11(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner11
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner11(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer12(i16* %ptr) {
+ %C = call i64 @inner12(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner12
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner12(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer13(i16* %ptr) {
+ %C = call i64 @inner13(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner13
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner13(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer14(i32* %ptr) {
+ %C = call i64 @inner14(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner14
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner14(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = zext i32 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer15(i32* %ptr) {
+ %C = call i64 @inner15(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner15
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner15(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = sext i32 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer16(i32 %V1, i64 %V2) {
+ %C = call i64 @inner16(i32 %V1, i64 %V2)
+ ret i64 %C
+}
+
+; sext can be folded into shl.
+; CHECK: Analyzing call of inner16
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 4
+define i64 @inner16(i32 %V1, i64 %V2) {
+ %E = sext i32 %V1 to i64
+ %S = shl i64 %E, 3
+ %A = add i64 %V2, %S
+ ret i64 %A
+}
diff --git a/test/Transforms/Inline/PowerPC/ext.ll b/test/Transforms/Inline/PowerPC/ext.ll
new file mode 100644
index 0000000000000..f7a409467b2c0
--- /dev/null
+++ b/test/Transforms/Inline/PowerPC/ext.ll
@@ -0,0 +1,140 @@
+; REQUIRES: asserts
+; RUN: opt -inline -S -debug-only=inline-cost < %s 2>&1 | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64le-ibm-linux-gnu"
+
+define i16 @outer1(i8* %ptr) {
+ %C = call i16 @inner1(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner1
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner1(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i16
+ ret i16 %E
+}
+
+define i32 @outer2(i8* %ptr) {
+ %C = call i32 @inner2(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner2
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner2(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer3(i16* %ptr) {
+ %C = call i32 @inner3(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner3
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner3(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer4(i16* %ptr) {
+ %C = call i32 @inner4(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner4
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner4(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i32
+ ret i32 %E
+}
+
+define i64 @outer5(i8* %ptr) {
+ %C = call i64 @inner5(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner5
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner5(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer6(i16* %ptr) {
+ %C = call i64 @inner6(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner6
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner6(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer7(i16* %ptr) {
+ %C = call i64 @inner7(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner7
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner7(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer8(i32* %ptr) {
+ %C = call i64 @inner8(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner8
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner8(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = zext i32 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer9(i32* %ptr) {
+ %C = call i64 @inner9(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner9
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner9(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = sext i32 %L to i64
+ ret i64 %E
+}
diff --git a/test/Transforms/Inline/PowerPC/lit.local.cfg b/test/Transforms/Inline/PowerPC/lit.local.cfg
new file mode 100644
index 0000000000000..5d33887ff0a48
--- /dev/null
+++ b/test/Transforms/Inline/PowerPC/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'PowerPC' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/Inline/X86/ext.ll b/test/Transforms/Inline/X86/ext.ll
new file mode 100644
index 0000000000000..bffda38527998
--- /dev/null
+++ b/test/Transforms/Inline/X86/ext.ll
@@ -0,0 +1,201 @@
+; REQUIRES: asserts
+; RUN: opt -inline -mtriple=x86_64-unknown-unknown -S -debug-only=inline-cost < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define i32 @outer1(i32* %ptr, i32 %i) {
+ %C = call i32 @inner1(i32* %ptr, i32 %i)
+ ret i32 %C
+}
+
+; zext from i32 to i64 is free.
+; CHECK: Analyzing call of inner1
+; CHECK: NumInstructionsSimplified: 3
+; CHECK: NumInstructions: 4
+define i32 @inner1(i32* %ptr, i32 %i) {
+ %E = zext i32 %i to i64
+ %G = getelementptr inbounds i32, i32* %ptr, i64 %E
+ %L = load i32, i32* %G
+ ret i32 %L
+}
+
+define i16 @outer2(i8* %ptr) {
+ %C = call i16 @inner2(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner2
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner2(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i16
+ ret i16 %E
+}
+
+define i16 @outer3(i8* %ptr) {
+ %C = call i16 @inner3(i8* %ptr)
+ ret i16 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner3
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i16 @inner3(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i16
+ ret i16 %E
+}
+
+define i32 @outer4(i8* %ptr) {
+ %C = call i32 @inner4(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner4
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner4(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer5(i8* %ptr) {
+ %C = call i32 @inner5(i8* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner5
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner5(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer6(i16* %ptr) {
+ %C = call i32 @inner6(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner6
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner6(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i32
+ ret i32 %E
+}
+
+define i32 @outer7(i16* %ptr) {
+ %C = call i32 @inner7(i16* %ptr)
+ ret i32 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner7
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i32 @inner7(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i32
+ ret i32 %E
+}
+
+define i64 @outer8(i8* %ptr) {
+ %C = call i64 @inner8(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner8
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner8(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = zext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer9(i8* %ptr) {
+ %C = call i64 @inner9(i8* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner9
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner9(i8* %ptr) {
+ %L = load i8, i8* %ptr
+ %E = sext i8 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer10(i16* %ptr) {
+ %C = call i64 @inner10(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner10
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner10(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = zext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer11(i16* %ptr) {
+ %C = call i64 @inner11(i16* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner11
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner11(i16* %ptr) {
+ %L = load i16, i16* %ptr
+ %E = sext i16 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer12(i32* %ptr) {
+ %C = call i64 @inner12(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner12
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner12(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = zext i32 %L to i64
+ ret i64 %E
+}
+
+define i64 @outer13(i32* %ptr) {
+ %C = call i64 @inner13(i32* %ptr)
+ ret i64 %C
+}
+
+; It is an ExtLoad.
+; CHECK: Analyzing call of inner13
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 3
+define i64 @inner13(i32* %ptr) {
+ %L = load i32, i32* %ptr
+ %E = sext i32 %L to i64
+ ret i64 %E
+}
diff --git a/test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll b/test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll
index 3c4e08b5b515c..9053578175094 100644
--- a/test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll
+++ b/test/Transforms/InstCombine/2017-07-07-UMul-ZExt.ll
@@ -1,7 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
-; CHECK: llvm.umul.with.overflow
define i32 @sterix(i32, i8, i64) {
+; CHECK-LABEL: @sterix(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CONV:%.*]] = zext i32 [[TMP0:%.*]] to i64
+; CHECK-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1:%.*]] to i32
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[CONV1]], 1945964878
+; CHECK-NEXT: [[SH_PROM:%.*]] = trunc i64 [[TMP2:%.*]] to i32
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[MUL]], [[SH_PROM]]
+; CHECK-NEXT: [[CONV2:%.*]] = zext i32 [[SHR]] to i64
+; CHECK-NEXT: [[MUL3:%.*]] = mul nuw nsw i64 [[CONV]], [[CONV2]]
+; CHECK-NEXT: [[CONV6:%.*]] = and i64 [[MUL3]], 4294967295
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i64 [[CONV6]], [[MUL3]]
+; CHECK-NEXT: br i1 [[TOBOOL]], label [[LOR_RHS:%.*]], label [[LOR_END:%.*]]
+; CHECK: lor.rhs:
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[MUL3]], [[TMP2]]
+; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[AND]] to i32
+; CHECK-NEXT: [[TOBOOL7:%.*]] = icmp eq i32 [[CONV4]], 0
+; CHECK-NEXT: [[PHITMP:%.*]] = zext i1 [[TOBOOL7]] to i32
+; CHECK-NEXT: br label [[LOR_END]]
+; CHECK: lor.end:
+; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[PHITMP]], [[LOR_RHS]] ]
+; CHECK-NEXT: ret i32 [[TMP3]]
+;
entry:
%conv = zext i32 %0 to i64
%conv1 = sext i8 %1 to i32
diff --git a/test/Transforms/InstCombine/and-not-or.ll b/test/Transforms/InstCombine/and-not-or.ll
deleted file mode 100644
index a42140be28052..0000000000000
--- a/test/Transforms/InstCombine/and-not-or.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: opt < %s -instcombine -S | grep "and i32 %x, %y" | count 4
-; RUN: opt < %s -instcombine -S | not grep "or"
-
-define i32 @func1(i32 %x, i32 %y) nounwind {
-entry:
- %n = xor i32 %y, -1
- %o = or i32 %n, %x
- %a = and i32 %o, %y
- ret i32 %a
-}
-
-define i32 @func2(i32 %x, i32 %y) nounwind {
-entry:
- %n = xor i32 %y, -1
- %o = or i32 %x, %n
- %a = and i32 %o, %y
- ret i32 %a
-}
-
-define i32 @func3(i32 %x, i32 %y) nounwind {
-entry:
- %n = xor i32 %y, -1
- %o = or i32 %n, %x
- %a = and i32 %y, %o
- ret i32 %a
-}
-
-define i32 @func4(i32 %x, i32 %y) nounwind {
-entry:
- %n = xor i32 %y, -1
- %o = or i32 %x, %n
- %a = and i32 %y, %o
- ret i32 %a
-}
diff --git a/test/Transforms/InstCombine/and.ll b/test/Transforms/InstCombine/and.ll
index 7bb9b95b31797..c12662d4db0e1 100644
--- a/test/Transforms/InstCombine/and.ll
+++ b/test/Transforms/InstCombine/and.ll
@@ -628,3 +628,195 @@ define i32 @test43(i32 %a, i32 %c, i32 %d) {
%and = and i32 %or, %xor
ret i32 %and
}
+
+; (~y | x) & y -> x & y
+define i32 @test44(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @test44(
+; CHECK-NEXT: [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[A]]
+;
+ %n = xor i32 %y, -1
+ %o = or i32 %n, %x
+ %a = and i32 %o, %y
+ ret i32 %a
+}
+
+; (x | ~y) & y -> x & y
+define i32 @test45(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @test45(
+; CHECK-NEXT: [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[A]]
+;
+ %n = xor i32 %y, -1
+ %o = or i32 %x, %n
+ %a = and i32 %o, %y
+ ret i32 %a
+}
+
+; y & (~y | x) -> y | x
+define i32 @test46(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @test46(
+; CHECK-NEXT: [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[A]]
+;
+ %n = xor i32 %y, -1
+ %o = or i32 %n, %x
+ %a = and i32 %y, %o
+ ret i32 %a
+}
+
+; y & (x | ~y) -> y | x
+define i32 @test47(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @test47(
+; CHECK-NEXT: [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[A]]
+;
+ %n = xor i32 %y, -1
+ %o = or i32 %x, %n
+ %a = and i32 %y, %o
+ ret i32 %a
+}
+
+; In the next 4 tests, vary the types and predicates for extra coverage.
+; (X & (Y | ~X)) -> (X & Y), where 'not' is an inverted cmp
+
+define i1 @and_orn_cmp_1(i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @and_orn_cmp_1(
+; CHECK-NEXT: [[X:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i32 [[C:%.*]], 42
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[X]], [[Y]]
+; CHECK-NEXT: ret i1 [[AND]]
+;
+ %x = icmp sgt i32 %a, %b
+ %x_inv = icmp sle i32 %a, %b
+ %y = icmp ugt i32 %c, 42 ; thwart complexity-based ordering
+ %or = or i1 %y, %x_inv
+ %and = and i1 %x, %or
+ ret i1 %and
+}
+
+; Commute the 'and':
+; ((Y | ~X) & X) -> (X & Y), where 'not' is an inverted cmp
+
+define <2 x i1> @and_orn_cmp_2(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: @and_orn_cmp_2(
+; CHECK-NEXT: [[X:%.*]] = icmp sge <2 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt <2 x i32> [[C:%.*]], <i32 42, i32 47>
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i1> [[Y]], [[X]]
+; CHECK-NEXT: ret <2 x i1> [[AND]]
+;
+ %x = icmp sge <2 x i32> %a, %b
+ %x_inv = icmp slt <2 x i32> %a, %b
+ %y = icmp ugt <2 x i32> %c, <i32 42, i32 47> ; thwart complexity-based ordering
+ %or = or <2 x i1> %y, %x_inv
+ %and = and <2 x i1> %or, %x
+ ret <2 x i1> %and
+}
+
+; Commute the 'or':
+; (X & (~X | Y)) -> (X & Y), where 'not' is an inverted cmp
+
+define i1 @and_orn_cmp_3(i72 %a, i72 %b, i72 %c) {
+; CHECK-LABEL: @and_orn_cmp_3(
+; CHECK-NEXT: [[X:%.*]] = icmp ugt i72 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i72 [[C:%.*]], 42
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[X]], [[Y]]
+; CHECK-NEXT: ret i1 [[AND]]
+;
+ %x = icmp ugt i72 %a, %b
+ %x_inv = icmp ule i72 %a, %b
+ %y = icmp ugt i72 %c, 42 ; thwart complexity-based ordering
+ %or = or i1 %x_inv, %y
+ %and = and i1 %x, %or
+ ret i1 %and
+}
+
+; Commute the 'and':
+; ((~X | Y) & X) -> (X & Y), where 'not' is an inverted cmp
+
+define <3 x i1> @or_andn_cmp_4(<3 x i32> %a, <3 x i32> %b, <3 x i32> %c) {
+; CHECK-LABEL: @or_andn_cmp_4(
+; CHECK-NEXT: [[X:%.*]] = icmp eq <3 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt <3 x i32> [[C:%.*]], <i32 42, i32 43, i32 -1>
+; CHECK-NEXT: [[AND:%.*]] = and <3 x i1> [[Y]], [[X]]
+; CHECK-NEXT: ret <3 x i1> [[AND]]
+;
+ %x = icmp eq <3 x i32> %a, %b
+ %x_inv = icmp ne <3 x i32> %a, %b
+ %y = icmp ugt <3 x i32> %c, <i32 42, i32 43, i32 -1> ; thwart complexity-based ordering
+ %or = or <3 x i1> %x_inv, %y
+ %and = and <3 x i1> %or, %x
+ ret <3 x i1> %and
+}
+
+; In the next 4 tests, vary the types and predicates for extra coverage.
+; (~X & (Y | X)) -> (~X & Y), where 'not' is an inverted cmp
+
+define i1 @andn_or_cmp_1(i37 %a, i37 %b, i37 %c) {
+; CHECK-LABEL: @andn_or_cmp_1(
+; CHECK-NEXT: [[X_INV:%.*]] = icmp sle i37 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i37 [[C:%.*]], 42
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[X_INV]], [[Y]]
+; CHECK-NEXT: ret i1 [[AND]]
+;
+ %x = icmp sgt i37 %a, %b
+ %x_inv = icmp sle i37 %a, %b
+ %y = icmp ugt i37 %c, 42 ; thwart complexity-based ordering
+ %or = or i1 %y, %x
+ %and = and i1 %x_inv, %or
+ ret i1 %and
+}
+
+; Commute the 'and':
+; ((Y | X) & ~X) -> (~X & Y), where 'not' is an inverted cmp
+
+define i1 @andn_or_cmp_2(i16 %a, i16 %b, i16 %c) {
+; CHECK-LABEL: @andn_or_cmp_2(
+; CHECK-NEXT: [[X_INV:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i16 [[C:%.*]], 42
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[Y]], [[X_INV]]
+; CHECK-NEXT: ret i1 [[AND]]
+;
+ %x = icmp sge i16 %a, %b
+ %x_inv = icmp slt i16 %a, %b
+ %y = icmp ugt i16 %c, 42 ; thwart complexity-based ordering
+ %or = or i1 %y, %x
+ %and = and i1 %or, %x_inv
+ ret i1 %and
+}
+
+; Commute the 'or':
+; (~X & (X | Y)) -> (~X & Y), where 'not' is an inverted cmp
+
+define <4 x i1> @andn_or_cmp_3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: @andn_or_cmp_3(
+; CHECK-NEXT: [[X_INV:%.*]] = icmp ule <4 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt <4 x i32> [[C:%.*]], <i32 42, i32 0, i32 1, i32 -1>
+; CHECK-NEXT: [[AND:%.*]] = and <4 x i1> [[X_INV]], [[Y]]
+; CHECK-NEXT: ret <4 x i1> [[AND]]
+;
+ %x = icmp ugt <4 x i32> %a, %b
+ %x_inv = icmp ule <4 x i32> %a, %b
+ %y = icmp ugt <4 x i32> %c, <i32 42, i32 0, i32 1, i32 -1> ; thwart complexity-based ordering
+ %or = or <4 x i1> %x, %y
+ %and = and <4 x i1> %x_inv, %or
+ ret <4 x i1> %and
+}
+
+; Commute the 'and':
+; ((X | Y) & ~X) -> (~X & Y), where 'not' is an inverted cmp
+
+define i1 @andn_or_cmp_4(i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @andn_or_cmp_4(
+; CHECK-NEXT: [[X_INV:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i32 [[C:%.*]], 42
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[Y]], [[X_INV]]
+; CHECK-NEXT: ret i1 [[AND]]
+;
+ %x = icmp eq i32 %a, %b
+ %x_inv = icmp ne i32 %a, %b
+ %y = icmp ugt i32 %c, 42 ; thwart complexity-based ordering
+ %or = or i1 %x, %y
+ %and = and i1 %or, %x_inv
+ ret i1 %and
+}
diff --git a/test/Transforms/InstCombine/and2.ll b/test/Transforms/InstCombine/and2.ll
index 001ac58891e46..15772d158f624 100644
--- a/test/Transforms/InstCombine/and2.ll
+++ b/test/Transforms/InstCombine/and2.ll
@@ -98,8 +98,7 @@ define i64 @test9(i64 %x) {
; combine -x & 1 into x & 1
define <2 x i64> @test9vec(<2 x i64> %x) {
; CHECK-LABEL: @test9vec(
-; CHECK-NEXT: [[SUB:%.*]] = sub nsw <2 x i64> zeroinitializer, [[X:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[SUB]], <i64 1, i64 1>
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> %x, <i64 1, i64 1>
; CHECK-NEXT: ret <2 x i64> [[AND]]
;
%sub = sub nsw <2 x i64> <i64 0, i64 0>, %x
@@ -119,6 +118,88 @@ define i64 @test10(i64 %x) {
ret i64 %add
}
+; (1 << x) & 1 --> zext(x == 0)
+
+define i8 @and1_shl1_is_cmp_eq_0(i8 %x) {
+; CHECK-LABEL: @and1_shl1_is_cmp_eq_0(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 %x, 0
+; CHECK-NEXT: [[AND:%.*]] = zext i1 [[TMP1]] to i8
+; CHECK-NEXT: ret i8 [[AND]]
+;
+ %sh = shl i8 1, %x
+ %and = and i8 %sh, 1
+ ret i8 %and
+}
+
+; Don't do it if the shift has another use.
+
+define i8 @and1_shl1_is_cmp_eq_0_multiuse(i8 %x) {
+; CHECK-LABEL: @and1_shl1_is_cmp_eq_0_multiuse(
+; CHECK-NEXT: [[SH:%.*]] = shl i8 1, %x
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[SH]], 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SH]], [[AND]]
+; CHECK-NEXT: ret i8 [[ADD]]
+;
+ %sh = shl i8 1, %x
+ %and = and i8 %sh, 1
+ %add = add i8 %sh, %and
+ ret i8 %add
+}
+
+; (1 << x) & 1 --> zext(x == 0)
+
+define <2 x i8> @and1_shl1_is_cmp_eq_0_vec(<2 x i8> %x) {
+; CHECK-LABEL: @and1_shl1_is_cmp_eq_0_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, zeroinitializer
+; CHECK-NEXT: [[AND:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8>
+; CHECK-NEXT: ret <2 x i8> [[AND]]
+;
+ %sh = shl <2 x i8> <i8 1, i8 1>, %x
+ %and = and <2 x i8> %sh, <i8 1, i8 1>
+ ret <2 x i8> %and
+}
+
+; (1 >> x) & 1 --> zext(x == 0)
+
+define i8 @and1_lshr1_is_cmp_eq_0(i8 %x) {
+; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 %x, 0
+; CHECK-NEXT: [[AND:%.*]] = zext i1 [[TMP1]] to i8
+; CHECK-NEXT: ret i8 [[AND]]
+;
+ %sh = lshr i8 1, %x
+ %and = and i8 %sh, 1
+ ret i8 %and
+}
+
+; Don't do it if the shift has another use.
+
+define i8 @and1_lshr1_is_cmp_eq_0_multiuse(i8 %x) {
+; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0_multiuse(
+; CHECK-NEXT: [[SH:%.*]] = lshr i8 1, %x
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[SH]], 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SH]], [[AND]]
+; CHECK-NEXT: ret i8 [[ADD]]
+;
+ %sh = lshr i8 1, %x
+ %and = and i8 %sh, 1
+ %add = add i8 %sh, %and
+ ret i8 %add
+}
+
+; (1 >> x) & 1 --> zext(x == 0)
+
+define <2 x i8> @and1_lshr1_is_cmp_eq_0_vec(<2 x i8> %x) {
+; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, zeroinitializer
+; CHECK-NEXT: [[AND:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8>
+; CHECK-NEXT: ret <2 x i8> [[AND]]
+;
+ %sh = lshr <2 x i8> <i8 1, i8 1>, %x
+ %and = and <2 x i8> %sh, <i8 1, i8 1>
+ ret <2 x i8> %and
+}
+
; The add in this test is unnecessary because the LSBs of the LHS are 0 and the 'and' only consumes bits from those LSBs. It doesn't matter what happens to the upper bits.
define i32 @test11(i32 %a, i32 %b) {
; CHECK-LABEL: @test11(
diff --git a/test/Transforms/InstCombine/element-atomic-memintrins.ll b/test/Transforms/InstCombine/element-atomic-memintrins.ll
new file mode 100644
index 0000000000000..2e3bfd7b721d6
--- /dev/null
+++ b/test/Transforms/InstCombine/element-atomic-memintrins.ll
@@ -0,0 +1,98 @@
+;; Placeholder tests that will fail once element atomic @llvm.mem[move|set] instrinsics have
+;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
+;; verify that inst combine handles these intrinsics properly once they have been
+;; added to that class hierarchy.
+
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+;; ---- memset -----
+
+; Ensure 0-length memset isn't removed
+define void @test_memset_zero_length(i8* %dest) {
+ ; CHECK-LABEL: test_memset_zero_length
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 0, i32 1)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 0, i32 1)
+ ret void
+}
+
+; Ensure that small-sized memsets don't convert to stores
+define void @test_memset_to_store(i8* %dest) {
+ ; CHECK-LABEL: test_memset_to_store
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 1, i32 1)
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 2, i32 1)
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 4, i32 1)
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 8, i32 1)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 1, i32 1)
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 2, i32 1)
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 4, i32 1)
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 8, i32 1)
+ ret void
+}
+
+declare void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nocapture writeonly, i8, i32, i32) nounwind argmemonly
+
+
+;; =========================================
+;; ----- memmove ------
+
+; memmove from a global constant source does not become memcpy
+@gconst = constant [8 x i8] c"0123456\00"
+define void @test_memmove_to_memcpy(i8* %dest) {
+ ; CHECK-LABEL: test_memmove_to_memcpy
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 getelementptr inbounds ([8 x i8], [8 x i8]* @gconst, i64 0, i64 0), i32 8, i32 1)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 getelementptr inbounds ([8 x i8], [8 x i8]* @gconst, i64 0, i64 0), i32 8, i32 1)
+ ret void
+}
+
+define void @test_memmove_zero_length(i8* %dest, i8* %src) {
+ ; CHECK-LABEL: test_memmove_zero_length
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 0, i32 1)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 0, i32 2)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 0, i32 4)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 0, i32 8)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 0, i32 16)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 0, i32 1)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 0, i32 2)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 0, i32 4)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 0, i32 8)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 0, i32 16)
+ ret void
+}
+
+; memmove with src==dest is removed
+define void @test_memmove_removed(i8* %srcdest, i32 %sz) {
+ ; CHECK-LABEL: test_memmove_removed
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %srcdest, i8* align 1 %srcdest, i32 %sz, i32 1)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %srcdest, i8* align 2 %srcdest, i32 %sz, i32 2)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %srcdest, i8* align 4 %srcdest, i32 %sz, i32 4)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %srcdest, i8* align 8 %srcdest, i32 %sz, i32 8)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %srcdest, i8* align 16 %srcdest, i32 %sz, i32 16)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %srcdest, i8* align 1 %srcdest, i32 %sz, i32 1)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %srcdest, i8* align 2 %srcdest, i32 %sz, i32 2)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %srcdest, i8* align 4 %srcdest, i32 %sz, i32 4)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %srcdest, i8* align 8 %srcdest, i32 %sz, i32 8)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %srcdest, i8* align 16 %srcdest, i32 %sz, i32 16)
+ ret void
+}
+
+; memmove with a small constant length is converted to a load/store pair
+define void @test_memmove_loadstore(i8* %dest, i8* %src) {
+ ; CHECK-LABEL: test_memmove_loadstore
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 2, i32 1)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 4, i32 1)
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 8, i32 1)
+ ; CHECK-NEXT: ret void
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 2, i32 1)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 4, i32 1)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 8, i32 1)
+ ret void
+}
+
+declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32) nounwind argmemonly
diff --git a/test/Transforms/InstCombine/icmp-logical.ll b/test/Transforms/InstCombine/icmp-logical.ll
index faae2016e2075..aa95cc5a13164 100644
--- a/test/Transforms/InstCombine/icmp-logical.ll
+++ b/test/Transforms/InstCombine/icmp-logical.ll
@@ -1,159 +1,138 @@
; RUN: opt -instcombine -S -o - %s | FileCheck %s
define i1 @masked_and_notallzeroes(i32 %A) {
-; CHECK-LABEL: @masked_and_notallzeroes
-; CHECK: [[MASK:%.*]] = and i32 %A, 7
-; CHECK: icmp ne i32 [[MASK]], 0
-; CHECK-NOT: and i32 %A, 39
-; CHECK: ret i1
-
+; CHECK-LABEL: @masked_and_notallzeroes(
+; CHECK-NEXT: [[MASK1:%.*]] = and i32 %A, 7
+; CHECK-NEXT: [[TST1:%.*]] = icmp ne i32 [[MASK1]], 0
+; CHECK-NEXT: ret i1 [[TST1]]
+;
%mask1 = and i32 %A, 7
%tst1 = icmp ne i32 %mask1, 0
-
%mask2 = and i32 %A, 39
%tst2 = icmp ne i32 %mask2, 0
-
%res = and i1 %tst1, %tst2
ret i1 %res
}
define i1 @masked_or_allzeroes(i32 %A) {
-; CHECK-LABEL: @masked_or_allzeroes
-; CHECK: [[MASK:%.*]] = and i32 %A, 7
-; CHECK: icmp eq i32 [[MASK]], 0
-; CHECK-NOT: and i32 %A, 39
-; CHECK: ret i1
-
+; CHECK-LABEL: @masked_or_allzeroes(
+; CHECK-NEXT: [[MASK1:%.*]] = and i32 %A, 7
+; CHECK-NEXT: [[TST1:%.*]] = icmp eq i32 [[MASK1]], 0
+; CHECK-NEXT: ret i1 [[TST1]]
+;
%mask1 = and i32 %A, 7
%tst1 = icmp eq i32 %mask1, 0
-
%mask2 = and i32 %A, 39
%tst2 = icmp eq i32 %mask2, 0
-
%res = or i1 %tst1, %tst2
ret i1 %res
}
define i1 @masked_and_notallones(i32 %A) {
-; CHECK-LABEL: @masked_and_notallones
-; CHECK: [[MASK:%.*]] = and i32 %A, 7
-; CHECK: icmp ne i32 [[MASK]], 7
-; CHECK-NOT: and i32 %A, 39
-; CHECK: ret i1
-
+; CHECK-LABEL: @masked_and_notallones(
+; CHECK-NEXT: [[MASK1:%.*]] = and i32 %A, 7
+; CHECK-NEXT: [[TST1:%.*]] = icmp ne i32 [[MASK1]], 7
+; CHECK-NEXT: ret i1 [[TST1]]
+;
%mask1 = and i32 %A, 7
%tst1 = icmp ne i32 %mask1, 7
-
%mask2 = and i32 %A, 39
%tst2 = icmp ne i32 %mask2, 39
-
%res = and i1 %tst1, %tst2
ret i1 %res
}
define i1 @masked_or_allones(i32 %A) {
-; CHECK-LABEL: @masked_or_allones
-; CHECK: [[MASK:%.*]] = and i32 %A, 7
-; CHECK: icmp eq i32 [[MASK]], 7
-; CHECK-NOT: and i32 %A, 39
-; CHECK: ret i1
-
+; CHECK-LABEL: @masked_or_allones(
+; CHECK-NEXT: [[MASK1:%.*]] = and i32 %A, 7
+; CHECK-NEXT: [[TST1:%.*]] = icmp eq i32 [[MASK1]], 7
+; CHECK-NEXT: ret i1 [[TST1]]
+;
%mask1 = and i32 %A, 7
%tst1 = icmp eq i32 %mask1, 7
-
%mask2 = and i32 %A, 39
%tst2 = icmp eq i32 %mask2, 39
-
%res = or i1 %tst1, %tst2
ret i1 %res
}
define i1 @masked_and_notA(i32 %A) {
-; CHECK-LABEL: @masked_and_notA
-; CHECK: [[MASK:%.*]] = and i32 %A, 39
-; CHECK: icmp ne i32 [[MASK]], %A
-; CHECK-NOT: and i32 %A, 7
-; CHECK: ret i1
-
+; CHECK-LABEL: @masked_and_notA(
+; CHECK-NEXT: [[MASK2:%.*]] = and i32 %A, 39
+; CHECK-NEXT: [[TST2:%.*]] = icmp ne i32 [[MASK2]], %A
+; CHECK-NEXT: ret i1 [[TST2]]
+;
%mask1 = and i32 %A, 7
%tst1 = icmp ne i32 %mask1, %A
-
%mask2 = and i32 %A, 39
%tst2 = icmp ne i32 %mask2, %A
-
%res = and i1 %tst1, %tst2
ret i1 %res
}
define i1 @masked_or_A(i32 %A) {
-; CHECK-LABEL: @masked_or_A
-; CHECK: [[MASK:%.*]] = and i32 %A, 39
-; CHECK: icmp eq i32 [[MASK]], %A
-; CHECK-NOT: and i32 %A, 7
-; CHECK: ret i1
-
+; CHECK-LABEL: @masked_or_A(
+; CHECK-NEXT: [[MASK2:%.*]] = and i32 %A, 39
+; CHECK-NEXT: [[TST2:%.*]] = icmp eq i32 [[MASK2]], %A
+; CHECK-NEXT: ret i1 [[TST2]]
+;
%mask1 = and i32 %A, 7
%tst1 = icmp eq i32 %mask1, %A
-
%mask2 = and i32 %A, 39
%tst2 = icmp eq i32 %mask2, %A
-
%res = or i1 %tst1, %tst2
ret i1 %res
}
define i1 @masked_or_allzeroes_notoptimised(i32 %A) {
-; CHECK-LABEL: @masked_or_allzeroes_notoptimised
-; CHECK: [[MASK:%.*]] = and i32 %A, 15
-; CHECK: icmp eq i32 [[MASK]], 0
-; CHECK: [[MASK:%.*]] = and i32 %A, 39
-; CHECK: icmp eq i32 [[MASK]], 0
-; CHECK: ret i1
-
+; CHECK-LABEL: @masked_or_allzeroes_notoptimised(
+; CHECK-NEXT: [[MASK1:%.*]] = and i32 %A, 15
+; CHECK-NEXT: [[TST1:%.*]] = icmp eq i32 [[MASK1]], 0
+; CHECK-NEXT: [[MASK2:%.*]] = and i32 %A, 39
+; CHECK-NEXT: [[TST2:%.*]] = icmp eq i32 [[MASK2]], 0
+; CHECK-NEXT: [[RES:%.*]] = or i1 [[TST1]], [[TST2]]
+; CHECK-NEXT: ret i1 [[RES]]
+;
%mask1 = and i32 %A, 15
%tst1 = icmp eq i32 %mask1, 0
-
%mask2 = and i32 %A, 39
%tst2 = icmp eq i32 %mask2, 0
-
%res = or i1 %tst1, %tst2
ret i1 %res
}
define i1 @nomask_lhs(i32 %in) {
-; CHECK-LABEL: @nomask_lhs
-; CHECK: [[MASK:%.*]] = and i32 %in, 1
-; CHECK: icmp eq i32 [[MASK]], 0
-; CHECK-NOT: icmp
-; CHECK: ret i1
+; CHECK-LABEL: @nomask_lhs(
+; CHECK-NEXT: [[MASKED:%.*]] = and i32 %in, 1
+; CHECK-NEXT: [[TST2:%.*]] = icmp eq i32 [[MASKED]], 0
+; CHECK-NEXT: ret i1 [[TST2]]
+;
%tst1 = icmp eq i32 %in, 0
-
%masked = and i32 %in, 1
%tst2 = icmp eq i32 %masked, 0
-
%val = or i1 %tst1, %tst2
ret i1 %val
}
-
define i1 @nomask_rhs(i32 %in) {
-; CHECK-LABEL: @nomask_rhs
-; CHECK: [[MASK:%.*]] = and i32 %in, 1
-; CHECK: icmp eq i32 [[MASK]], 0
-; CHECK-NOT: icmp
-; CHECK: ret i1
+; CHECK-LABEL: @nomask_rhs(
+; CHECK-NEXT: [[MASKED:%.*]] = and i32 %in, 1
+; CHECK-NEXT: [[TST1:%.*]] = icmp eq i32 [[MASKED]], 0
+; CHECK-NEXT: ret i1 [[TST1]]
+;
%masked = and i32 %in, 1
%tst1 = icmp eq i32 %masked, 0
-
%tst2 = icmp eq i32 %in, 0
-
%val = or i1 %tst1, %tst2
ret i1 %val
}
+; TODO: This test simplifies to a constant, so the functionality and test could be in InstSimplify.
+
define i1 @fold_mask_cmps_to_false(i32 %x) {
-; CHECK-LABEL: @fold_mask_cmps_to_false
-; CHECK: ret i1 false
+; CHECK-LABEL: @fold_mask_cmps_to_false(
+; CHECK-NEXT: ret i1 false
+;
%1 = and i32 %x, 2147483647
%2 = icmp eq i32 %1, 0
%3 = icmp eq i32 %x, 2147483647
@@ -161,12 +140,46 @@ define i1 @fold_mask_cmps_to_false(i32 %x) {
ret i1 %4
}
+; TODO: This test simplifies to a constant, so the functionality and test could be in InstSimplify.
+
define i1 @fold_mask_cmps_to_true(i32 %x) {
-; CHECK-LABEL: @fold_mask_cmps_to_true
-; CHECK: ret i1 true
+; CHECK-LABEL: @fold_mask_cmps_to_true(
+; CHECK-NEXT: ret i1 true
+;
%1 = and i32 %x, 2147483647
%2 = icmp ne i32 %1, 0
%3 = icmp ne i32 %x, 2147483647
%4 = or i1 %3, %2
ret i1 %4
}
+
+; PR32401 - https://bugs.llvm.org/show_bug.cgi?id=32401
+
+define i1 @cmpeq_bitwise(i8 %a, i8 %b, i8 %c, i8 %d) {
+; CHECK-LABEL: @cmpeq_bitwise(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 %c, %d
+; CHECK-NEXT: [[CMP:%.*]] = and i1 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xor1 = xor i8 %a, %b
+ %xor2 = xor i8 %c, %d
+ %or = or i8 %xor1, %xor2
+ %cmp = icmp eq i8 %or, 0
+ ret i1 %cmp
+}
+
+define <2 x i1> @cmpne_bitwise(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
+; CHECK-LABEL: @cmpne_bitwise(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i64> %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <2 x i64> %c, %d
+; CHECK-NEXT: [[CMP:%.*]] = or <2 x i1> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %xor1 = xor <2 x i64> %a, %b
+ %xor2 = xor <2 x i64> %c, %d
+ %or = or <2 x i64> %xor1, %xor2
+ %cmp = icmp ne <2 x i64> %or, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
diff --git a/test/Transforms/InstCombine/or-xor.ll b/test/Transforms/InstCombine/or-xor.ll
index 947971c6c83b0..be64f51b6c4c5 100644
--- a/test/Transforms/InstCombine/or-xor.ll
+++ b/test/Transforms/InstCombine/or-xor.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -instcombine < %s | FileCheck %s
-define i32 @test1(i32 %x, i32 %y) nounwind {
+; X | ~(X | Y) --> X | ~Y
+
+define i32 @test1(i32 %x, i32 %y) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[Y_NOT:%.*]] = xor i32 %y, -1
; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y_NOT]], %x
@@ -13,7 +15,10 @@ define i32 @test1(i32 %x, i32 %y) nounwind {
ret i32 %z
}
-define i32 @test2(i32 %x, i32 %y) nounwind {
+; Commute (rename) the inner 'or' operands:
+; Y | ~(X | Y) --> ~X | Y
+
+define i32 @test2(i32 %x, i32 %y) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 %x, -1
; CHECK-NEXT: [[Z:%.*]] = or i32 [[X_NOT]], %y
@@ -25,7 +30,9 @@ define i32 @test2(i32 %x, i32 %y) nounwind {
ret i32 %z
}
-define i32 @test3(i32 %x, i32 %y) nounwind {
+; X | ~(X ^ Y) --> X | ~Y
+
+define i32 @test3(i32 %x, i32 %y) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[Y_NOT:%.*]] = xor i32 %y, -1
; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y_NOT]], %x
@@ -37,7 +44,10 @@ define i32 @test3(i32 %x, i32 %y) nounwind {
ret i32 %z
}
-define i32 @test4(i32 %x, i32 %y) nounwind {
+; Commute (rename) the 'xor' operands:
+; Y | ~(X ^ Y) --> ~X | Y
+
+define i32 @test4(i32 %x, i32 %y) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 %x, -1
; CHECK-NEXT: [[Z:%.*]] = or i32 [[X_NOT]], %y
@@ -49,7 +59,7 @@ define i32 @test4(i32 %x, i32 %y) nounwind {
ret i32 %z
}
-define i32 @test5(i32 %x, i32 %y) nounwind {
+define i32 @test5(i32 %x, i32 %y) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: ret i32 -1
;
@@ -59,7 +69,7 @@ define i32 @test5(i32 %x, i32 %y) nounwind {
ret i32 %z
}
-define i32 @test6(i32 %x, i32 %y) nounwind {
+define i32 @test6(i32 %x, i32 %y) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: ret i32 -1
;
@@ -69,7 +79,7 @@ define i32 @test6(i32 %x, i32 %y) nounwind {
ret i32 %z
}
-define i32 @test7(i32 %x, i32 %y) nounwind {
+define i32 @test7(i32 %x, i32 %y) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[Z:%.*]] = or i32 %x, %y
; CHECK-NEXT: ret i32 [[Z]]
@@ -79,7 +89,7 @@ define i32 @test7(i32 %x, i32 %y) nounwind {
ret i32 %z
}
-define i32 @test8(i32 %x, i32 %y) nounwind {
+define i32 @test8(i32 %x, i32 %y) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 %x, -1
; CHECK-NEXT: [[Z:%.*]] = or i32 [[X_NOT]], %y
@@ -91,7 +101,7 @@ define i32 @test8(i32 %x, i32 %y) nounwind {
ret i32 %z
}
-define i32 @test9(i32 %x, i32 %y) nounwind {
+define i32 @test9(i32 %x, i32 %y) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: [[Y_NOT:%.*]] = xor i32 %y, -1
; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y_NOT]], %x
diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll
index 764fe4503b5e1..fb56449ba4d46 100644
--- a/test/Transforms/InstCombine/or.ll
+++ b/test/Transforms/InstCombine/or.ll
@@ -397,14 +397,74 @@ define <2 x i132> @orsext_to_sel_vec_swap(<2 x i132> %x, <2 x i1> %y) {
ret <2 x i132> %or
}
-define i32 @test39(i32 %a, i32 %b) {
-; CHECK-LABEL: @test39(
-; CHECK-NEXT: [[OR:%.*]] = or i32 %b, %a
+; (~A & B) | A --> A | B
+
+define i32 @test39a(i32 %a, float %b) {
+; CHECK-LABEL: @test39a(
+; CHECK-NEXT: [[A1:%.*]] = mul i32 %a, 42
+; CHECK-NEXT: [[B1:%.*]] = bitcast float %b to i32
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[A1]], [[B1]]
; CHECK-NEXT: ret i32 [[OR]]
;
- %xor = xor i32 %a, -1
- %and = and i32 %xor, %b
- %or = or i32 %and, %a
+ %a1 = mul i32 %a, 42 ; thwart complexity-based ordering
+ %b1 = bitcast float %b to i32 ; thwart complexity-based ordering
+ %nota = xor i32 %a1, -1
+ %and = and i32 %nota, %b1
+ %or = or i32 %and, %a1
+ ret i32 %or
+}
+
+; Commute 'and' operands:
+; (B & ~A) | A --> A | B
+
+define i32 @test39b(i32 %a, float %b) {
+; CHECK-LABEL: @test39b(
+; CHECK-NEXT: [[A1:%.*]] = mul i32 %a, 42
+; CHECK-NEXT: [[B1:%.*]] = bitcast float %b to i32
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[A1]], [[B1]]
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %a1 = mul i32 %a, 42 ; thwart complexity-based ordering
+ %b1 = bitcast float %b to i32 ; thwart complexity-based ordering
+ %nota = xor i32 %a1, -1
+ %and = and i32 %b1, %nota
+ %or = or i32 %and, %a1
+ ret i32 %or
+}
+
+; Commute 'or' operands:
+; A | (~A & B) --> A | B
+
+define i32 @test39c(i32 %a, float %b) {
+; CHECK-LABEL: @test39c(
+; CHECK-NEXT: [[A1:%.*]] = mul i32 %a, 42
+; CHECK-NEXT: [[B1:%.*]] = bitcast float %b to i32
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[A1]], [[B1]]
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %a1 = mul i32 %a, 42 ; thwart complexity-based ordering
+ %b1 = bitcast float %b to i32 ; thwart complexity-based ordering
+ %nota = xor i32 %a1, -1
+ %and = and i32 %nota, %b1
+ %or = or i32 %a1, %and
+ ret i32 %or
+}
+
+; Commute 'and' operands:
+; A | (B & ~A) --> A | B
+
+define i32 @test39d(i32 %a, float %b) {
+; CHECK-LABEL: @test39d(
+; CHECK-NEXT: [[A1:%.*]] = mul i32 %a, 42
+; CHECK-NEXT: [[B1:%.*]] = bitcast float %b to i32
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[A1]], [[B1]]
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %a1 = mul i32 %a, 42 ; thwart complexity-based ordering
+ %b1 = bitcast float %b to i32 ; thwart complexity-based ordering
+ %nota = xor i32 %a1, -1
+ %and = and i32 %b1, %nota
+ %or = or i32 %a1, %and
ret i32 %or
}
@@ -456,60 +516,6 @@ define i32 @test40d(i32 %a, i32 %b) {
ret i32 %or
}
-define i32 @test41(i32 %a, i32 %b) {
-; CHECK-LABEL: @test41(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 %a, -1
-; CHECK-NEXT: [[OR:%.*]] = xor i32 [[TMP1]], %b
-; CHECK-NEXT: ret i32 [[OR]]
-;
- %and = and i32 %a, %b
- %nega = xor i32 %a, -1
- %xor = xor i32 %nega, %b
- %or = or i32 %and, %xor
- ret i32 %or
-}
-
-; (~A ^ B) | (A & B) -> (~A ^ B)
-
-define i32 @test42(i32 %a, i32 %b) {
-; CHECK-LABEL: @test42(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 %a, -1
-; CHECK-NEXT: [[OR:%.*]] = xor i32 [[TMP1]], %b
-; CHECK-NEXT: ret i32 [[OR]]
-;
- %nega = xor i32 %a, -1
- %xor = xor i32 %nega, %b
- %and = and i32 %a, %b
- %or = or i32 %xor, %and
- ret i32 %or
-}
-
-define i32 @test42_commuted_and(i32 %a, i32 %b) {
-; CHECK-LABEL: @test42_commuted_and(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 %a, -1
-; CHECK-NEXT: [[OR:%.*]] = xor i32 [[TMP1]], %b
-; CHECK-NEXT: ret i32 [[OR]]
-;
- %nega = xor i32 %a, -1
- %xor = xor i32 %nega, %b
- %and = and i32 %b, %a
- %or = or i32 %xor, %and
- ret i32 %or
-}
-
-define i32 @test42_commuted_xor(i32 %a, i32 %b) {
-; CHECK-LABEL: @test42_commuted_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 %a, -1
-; CHECK-NEXT: [[OR:%.*]] = xor i32 [[TMP1]], %b
-; CHECK-NEXT: ret i32 [[OR]]
-;
- %nega = xor i32 %a, -1
- %xor = xor i32 %b, %nega
- %and = and i32 %a, %b
- %or = or i32 %xor, %and
- ret i32 %or
-}
-
define i32 @test45(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test45(
; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, %z
@@ -648,41 +654,146 @@ final:
ret <2 x i32> %value
}
-define i8 @test51(i8 %a, i8 %b, i8 %c) {
-; CHECK-LABEL: @test51(
-; CHECK-NEXT: [[W:%.*]] = mul i8 [[B:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[X:%.*]] = or i8 [[W]], [[A:%.*]]
-; CHECK-NEXT: ret i8 [[X]]
+; In the next 4 tests, vary the types and predicates for extra coverage.
+; (X | (Y & ~X)) -> (X | Y), where 'not' is an inverted cmp
+
+define i1 @or_andn_cmp_1(i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @or_andn_cmp_1(
+; CHECK-NEXT: [[X:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i32 [[C:%.*]], 42
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[X]], [[Y]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %x = icmp sgt i32 %a, %b
+ %x_inv = icmp sle i32 %a, %b
+ %y = icmp ugt i32 %c, 42 ; thwart complexity-based ordering
+ %and = and i1 %y, %x_inv
+ %or = or i1 %x, %and
+ ret i1 %or
+}
+
+; Commute the 'or':
+; ((Y & ~X) | X) -> (X | Y), where 'not' is an inverted cmp
+
+define <2 x i1> @or_andn_cmp_2(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: @or_andn_cmp_2(
+; CHECK-NEXT: [[X:%.*]] = icmp sge <2 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt <2 x i32> [[C:%.*]], <i32 42, i32 47>
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i1> [[Y]], [[X]]
+; CHECK-NEXT: ret <2 x i1> [[OR]]
+;
+ %x = icmp sge <2 x i32> %a, %b
+ %x_inv = icmp slt <2 x i32> %a, %b
+ %y = icmp ugt <2 x i32> %c, <i32 42, i32 47> ; thwart complexity-based ordering
+ %and = and <2 x i1> %y, %x_inv
+ %or = or <2 x i1> %and, %x
+ ret <2 x i1> %or
+}
+
+; Commute the 'and':
+; (X | (~X & Y)) -> (X | Y), where 'not' is an inverted cmp
+
+define i1 @or_andn_cmp_3(i72 %a, i72 %b, i72 %c) {
+; CHECK-LABEL: @or_andn_cmp_3(
+; CHECK-NEXT: [[X:%.*]] = icmp ugt i72 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i72 [[C:%.*]], 42
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[X]], [[Y]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %x = icmp ugt i72 %a, %b
+ %x_inv = icmp ule i72 %a, %b
+ %y = icmp ugt i72 %c, 42 ; thwart complexity-based ordering
+ %and = and i1 %x_inv, %y
+ %or = or i1 %x, %and
+ ret i1 %or
+}
+
+; Commute the 'or':
+; ((~X & Y) | X) -> (X | Y), where 'not' is an inverted cmp
+
+define <3 x i1> @or_andn_cmp_4(<3 x i32> %a, <3 x i32> %b, <3 x i32> %c) {
+; CHECK-LABEL: @or_andn_cmp_4(
+; CHECK-NEXT: [[X:%.*]] = icmp eq <3 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt <3 x i32> [[C:%.*]], <i32 42, i32 43, i32 -1>
+; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[Y]], [[X]]
+; CHECK-NEXT: ret <3 x i1> [[OR]]
+;
+ %x = icmp eq <3 x i32> %a, %b
+ %x_inv = icmp ne <3 x i32> %a, %b
+ %y = icmp ugt <3 x i32> %c, <i32 42, i32 43, i32 -1> ; thwart complexity-based ordering
+ %and = and <3 x i1> %x_inv, %y
+ %or = or <3 x i1> %and, %x
+ ret <3 x i1> %or
+}
+
+; In the next 4 tests, vary the types and predicates for extra coverage.
+; (~X | (Y & X)) -> (~X | Y), where 'not' is an inverted cmp
+
+define i1 @orn_and_cmp_1(i37 %a, i37 %b, i37 %c) {
+; CHECK-LABEL: @orn_and_cmp_1(
+; CHECK-NEXT: [[X_INV:%.*]] = icmp sle i37 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i37 [[C:%.*]], 42
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[X_INV]], [[Y]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %x = icmp sgt i37 %a, %b
+ %x_inv = icmp sle i37 %a, %b
+ %y = icmp ugt i37 %c, 42 ; thwart complexity-based ordering
+ %and = and i1 %y, %x
+ %or = or i1 %x_inv, %and
+ ret i1 %or
+}
+
+; Commute the 'or':
+; ((Y & X) | ~X) -> (~X | Y), where 'not' is an inverted cmp
+
+define i1 @orn_and_cmp_2(i16 %a, i16 %b, i16 %c) {
+; CHECK-LABEL: @orn_and_cmp_2(
+; CHECK-NEXT: [[X_INV:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i16 [[C:%.*]], 42
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[Y]], [[X_INV]]
+; CHECK-NEXT: ret i1 [[OR]]
;
- %w = mul i8 %b, %c
- %z = xor i8 %a, -1
- %y = and i8 %w, %z
- %x = or i8 %y, %a
- ret i8 %x
+ %x = icmp sge i16 %a, %b
+ %x_inv = icmp slt i16 %a, %b
+ %y = icmp ugt i16 %c, 42 ; thwart complexity-based ordering
+ %and = and i1 %y, %x
+ %or = or i1 %and, %x_inv
+ ret i1 %or
}
-define i8 @test52(i8 %a, i8 %b, i8 %c) {
-; CHECK-LABEL: @test52(
-; CHECK-NEXT: [[W:%.*]] = mul i8 [[B:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[X:%.*]] = or i8 [[W]], [[A:%.*]]
-; CHECK-NEXT: ret i8 [[X]]
+; Commute the 'and':
+; (~X | (X & Y)) -> (~X | Y), where 'not' is an inverted cmp
+
+define <4 x i1> @orn_and_cmp_3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: @orn_and_cmp_3(
+; CHECK-NEXT: [[X_INV:%.*]] = icmp ule <4 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt <4 x i32> [[C:%.*]], <i32 42, i32 0, i32 1, i32 -1>
+; CHECK-NEXT: [[OR:%.*]] = or <4 x i1> [[X_INV]], [[Y]]
+; CHECK-NEXT: ret <4 x i1> [[OR]]
;
- %w = mul i8 %b, %c
- %z = xor i8 %w, -1
- %y = and i8 %z, %a
- %x = or i8 %w, %y
- ret i8 %x
+ %x = icmp ugt <4 x i32> %a, %b
+ %x_inv = icmp ule <4 x i32> %a, %b
+ %y = icmp ugt <4 x i32> %c, <i32 42, i32 0, i32 1, i32 -1> ; thwart complexity-based ordering
+ %and = and <4 x i1> %x, %y
+ %or = or <4 x i1> %x_inv, %and
+ ret <4 x i1> %or
}
-define i8 @test53(i8 %a, i8 %b, i8 %c) {
-; CHECK-LABEL: @test53(
-; CHECK-NEXT: [[W:%.*]] = mul i8 [[B:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[X:%.*]] = or i8 [[W]], [[A:%.*]]
-; CHECK-NEXT: ret i8 [[X]]
+; Commute the 'or':
+; ((X & Y) | ~X) -> (~X | Y), where 'not' is an inverted cmp
+
+define i1 @orn_and_cmp_4(i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @orn_and_cmp_4(
+; CHECK-NEXT: [[X_INV:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = icmp ugt i32 [[C:%.*]], 42
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[Y]], [[X_INV]]
+; CHECK-NEXT: ret i1 [[OR]]
;
- %w = mul i8 %b, %c
- %z = xor i8 %w, -1
- %y = and i8 %z, %a
- %x = or i8 %w, %y
- ret i8 %x
+ %x = icmp eq i32 %a, %b
+ %x_inv = icmp ne i32 %a, %b
+ %y = icmp ugt i32 %c, 42 ; thwart complexity-based ordering
+ %and = and i1 %x, %y
+ %or = or i1 %and, %x_inv
+ ret i1 %or
}
diff --git a/test/Transforms/InstCombine/pr33765.ll b/test/Transforms/InstCombine/pr33765.ll
new file mode 100644
index 0000000000000..99ed0d13b5cf5
--- /dev/null
+++ b/test/Transforms/InstCombine/pr33765.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S %s -instcombine | FileCheck %s
+
+@glob = external global i16
+
+define void @patatino(i8 %beth) {
+; CHECK-LABEL: @patatino(
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[BETH:%.*]] to i32
+; CHECK-NEXT: br i1 undef, label [[IF_THEN9:%.*]], label [[IF_THEN9]]
+; CHECK: if.then9:
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[CONV]], [[CONV]]
+; CHECK-NEXT: [[TINKY:%.*]] = load i16, i16* @glob, align 2
+; CHECK-NEXT: [[CONV131:%.*]] = zext i16 [[TINKY]] to i32
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[MUL]], [[CONV131]]
+; CHECK-NEXT: [[CONV14:%.*]] = trunc i32 [[AND]] to i16
+; CHECK-NEXT: store i16 [[CONV14]], i16* @glob, align 2
+; CHECK-NEXT: ret void
+;
+ %conv = zext i8 %beth to i32
+ %mul = mul nuw nsw i32 %conv, %conv
+ %conv3 = and i32 %mul, 255
+ %tobool8 = icmp ne i32 %mul, %conv3
+ br i1 %tobool8, label %if.then9, label %if.then9
+
+if.then9:
+ %tinky = load i16, i16* @glob
+ %conv13 = sext i16 %tinky to i32
+ %and = and i32 %mul, %conv13
+ %conv14 = trunc i32 %and to i16
+ store i16 %conv14, i16* @glob
+ ret void
+}
diff --git a/test/Transforms/JumpThreading/select.ll b/test/Transforms/JumpThreading/select.ll
index 6a3cf7edd7dcd..5e84ec54971a0 100644
--- a/test/Transforms/JumpThreading/select.ll
+++ b/test/Transforms/JumpThreading/select.ll
@@ -280,10 +280,85 @@ cond.false.15.i: ; preds = %cond.false.10.i
ret i32 %j.add3
; CHECK-LABEL: @unfold3
-; CHECK: br i1 %cmp.i, label %.exit.thread2, label %cond.false.i
+; CHECK: br i1 %cmp.i, label %.exit.thread2, label %cond.false.i
; CHECK: br i1 %cmp4.i, label %.exit.thread, label %cond.false.6.i
; CHECK: br i1 %cmp8.i, label %.exit.thread2, label %cond.false.10.i
; CHECK: br i1 %cmp13.i, label %.exit.thread, label %.exit
; CHECK: br i1 %phitmp, label %.exit.thread, label %.exit.thread2
; CHECK: br label %.exit.thread2
}
+
+define i32 @unfold4(i32 %u, i32 %v, i32 %w, i32 %x, i32 %y, i32 %z, i32 %j) nounwind {
+entry:
+ %add3 = add nsw i32 %j, 2
+ %cmp.i = icmp slt i32 %u, %v
+ br i1 %cmp.i, label %.exit, label %cond.false.i
+
+cond.false.i: ; preds = %entry
+ %cmp4.i = icmp sgt i32 %u, %v
+ br i1 %cmp4.i, label %.exit, label %cond.false.6.i
+
+cond.false.6.i: ; preds = %cond.false.i
+ %cmp8.i = icmp slt i32 %w, %x
+ br i1 %cmp8.i, label %.exit, label %cond.false.10.i
+
+cond.false.10.i: ; preds = %cond.false.6.i
+ %cmp13.i = icmp sgt i32 %w, %x
+ br i1 %cmp13.i, label %.exit, label %cond.false.15.i
+
+cond.false.15.i: ; preds = %cond.false.10.i
+ %cmp19.i = icmp sge i32 %y, %z
+ %conv = zext i1 %cmp19.i to i32
+ br label %.exit
+
+.exit: ; preds = %entry, %cond.false.i, %cond.false.6.i, %cond.false.10.i, %cond.false.15.i
+ %cond23.i = phi i32 [ 1, %entry ], [ 0, %cond.false.i ], [ 1, %cond.false.6.i ], [ %conv, %cond.false.15.i ], [ 0, %cond.false.10.i ]
+ %lnot.i18 = icmp eq i32 %cond23.i, 1
+ %j.add3 = select i1 %lnot.i18, i32 %j, i32 %add3
+ ret i32 %j.add3
+
+; CHECK-LABEL: @unfold4
+; CHECK: br i1 %cmp.i, label %.exit.thread, label %cond.false.i
+; CHECK: br i1 %cmp4.i, label %.exit.thread3, label %cond.false.6.i
+; CHECK: br i1 %cmp8.i, label %.exit.thread, label %cond.false.10.i
+; CHECK: br i1 %cmp13.i, label %.exit.thread3, label %.exit
+; CHECK: br i1 %lnot.i18, label %.exit.thread, label %.exit.thread3
+; CHECK: br label %.exit.thread3
+}
+
+define i32 @unfold5(i32 %u, i32 %v, i32 %w, i32 %x, i32 %y, i32 %z, i32 %j) nounwind {
+entry:
+ %add3 = add nsw i32 %j, 2
+ %cmp.i = icmp slt i32 %u, %v
+ br i1 %cmp.i, label %.exit, label %cond.false.i
+
+cond.false.i: ; preds = %entry
+ %cmp4.i = icmp sgt i32 %u, %v
+ br i1 %cmp4.i, label %.exit, label %cond.false.6.i
+
+cond.false.6.i: ; preds = %cond.false.i
+ %cmp8.i = icmp slt i32 %w, %x
+ br i1 %cmp8.i, label %.exit, label %cond.false.10.i
+
+cond.false.10.i: ; preds = %cond.false.6.i
+ %cmp13.i = icmp sgt i32 %w, %x
+ br i1 %cmp13.i, label %.exit, label %cond.false.15.i
+
+cond.false.15.i: ; preds = %cond.false.10.i
+ %cmp19.i = icmp sge i32 %y, %z
+ %conv = zext i1 %cmp19.i to i32
+ br label %.exit
+
+.exit: ; preds = %entry, %cond.false.i, %cond.false.6.i, %cond.false.10.i, %cond.false.15.i
+ %cond23.i = phi i32 [ 2, %entry ], [ 3, %cond.false.i ], [ 1, %cond.false.6.i ], [ %conv, %cond.false.15.i ], [ 7, %cond.false.10.i ]
+ %lnot.i18 = icmp sgt i32 %cond23.i, 5
+ %j.add3 = select i1 %lnot.i18, i32 %j, i32 %cond23.i
+ ret i32 %j.add3
+
+; CHECK-LABEL: @unfold5
+; CHECK: br i1 %cmp.i, label %.exit, label %cond.false.i
+; CHECK: br i1 %cmp4.i, label %.exit, label %cond.false.6.i
+; CHECK: br i1 %cmp8.i, label %.exit, label %cond.false.10.i
+; CHECK: br i1 %cmp13.i, label %.exit, label %cond.false.15.i
+; CHECK: br label %.exit
+}
diff --git a/test/Transforms/LoopInterchange/current-limitations-lcssa.ll b/test/Transforms/LoopInterchange/current-limitations-lcssa.ll
new file mode 100644
index 0000000000000..df6c6cfdbcb5d
--- /dev/null
+++ b/test/Transforms/LoopInterchange/current-limitations-lcssa.ll
@@ -0,0 +1,76 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+@C = common global [100 x [100 x i32]] zeroinitializer
+
+;; FIXME:
+;; Test for interchange when we have an lcssa phi. This should ideally be interchanged but it is currently not supported.
+;; for(gi=1;gi<N;gi++)
+;; for(gj=1;gj<M;gj++)
+;; A[gj][gi] = A[gj - 1][gi] + C[gj][gi];
+
+@gi = common global i32 0
+@gj = common global i32 0
+
+define void @interchange_07(i32 %N, i32 %M){
+entry:
+ store i32 1, i32* @gi
+ %cmp21 = icmp sgt i32 %N, 1
+ br i1 %cmp21, label %for.cond1.preheader.lr.ph, label %for.end16
+
+for.cond1.preheader.lr.ph:
+ %cmp218 = icmp sgt i32 %M, 1
+ %gi.promoted = load i32, i32* @gi
+ %0 = add i32 %M, -1
+ %1 = sext i32 %gi.promoted to i64
+ %2 = sext i32 %N to i64
+ %3 = add i32 %gi.promoted, 1
+ %4 = icmp slt i32 %3, %N
+ %smax = select i1 %4, i32 %N, i32 %3
+ br label %for.cond1.preheader
+
+for.cond1.preheader:
+ %indvars.iv25 = phi i64 [ %1, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next26, %for.inc14 ]
+ br i1 %cmp218, label %for.body3, label %for.inc14
+
+for.body3:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 1, %for.cond1.preheader ]
+ %5 = add nsw i64 %indvars.iv, -1
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %5, i64 %indvars.iv25
+ %6 = load i32, i32* %arrayidx5
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %indvars.iv, i64 %indvars.iv25
+ %7 = load i32, i32* %arrayidx9
+ %add = add nsw i32 %7, %6
+ %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv25
+ store i32 %add, i32* %arrayidx13
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %0
+ br i1 %exitcond, label %for.inc14, label %for.body3
+
+for.inc14:
+ %inc.lcssa23 = phi i32 [ 1, %for.cond1.preheader ], [ %M, %for.body3 ]
+ %indvars.iv.next26 = add nsw i64 %indvars.iv25, 1
+ %cmp = icmp slt i64 %indvars.iv.next26, %2
+ br i1 %cmp, label %for.cond1.preheader, label %for.cond.for.end16_crit_edge
+
+for.cond.for.end16_crit_edge:
+ store i32 %inc.lcssa23, i32* @gj
+ store i32 %smax, i32* @gi
+ br label %for.end16
+
+for.end16:
+ ret void
+}
+
+; CHECK-LABEL: @interchange_07
+; CHECK: for.body3: ; preds = %for.body3.preheader, %for.body3
+; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 1, %for.body3.preheader ]
+; CHECK: %5 = add nsw i64 %indvars.iv, -1
+; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %5, i64 %indvars.iv25
+; CHECK: %6 = load i32, i32* %arrayidx5
+; CHECK: %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %indvars.iv, i64 %indvars.iv25
diff --git a/test/Transforms/LoopInterchange/interchange-flow-dep-outer.ll b/test/Transforms/LoopInterchange/interchange-flow-dep-outer.ll
new file mode 100644
index 0000000000000..c3b0b9291424b
--- /dev/null
+++ b/test/Transforms/LoopInterchange/interchange-flow-dep-outer.ll
@@ -0,0 +1,118 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+@B = common global [100 x i32] zeroinitializer
+@C = common global [100 x [100 x i32]] zeroinitializer
+@D = common global [100 x [100 x [100 x i32]]] zeroinitializer
+
+;; Test that a flow dependency in outer loop doesn't prevent interchange in
+;; loops i and j.
+;;
+;; for (int k = 0; k < 100; ++k) {
+;; T[k] = fn1();
+;; for (int i = 0; i < 1000; ++i)
+;; for(int j = 1; j < 1000; ++j)
+;; Arr[j][i] = Arr[j][i]+k;
+;; fn2(T[k]);
+;; }
+
+@T = internal global [100 x double] zeroinitializer, align 4
+@Arr = internal global [1000 x [1000 x i32]] zeroinitializer, align 4
+
+define void @interchange_09(i32 %k) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.cond.cleanup4
+ ret void
+
+for.body: ; preds = %for.cond.cleanup4, %entry
+ %indvars.iv45 = phi i64 [ 0, %entry ], [ %indvars.iv.next46, %for.cond.cleanup4 ]
+ %call = call double @fn1()
+ %arrayidx = getelementptr inbounds [100 x double], [100 x double]* @T, i64 0, i64 %indvars.iv45
+ store double %call, double* %arrayidx, align 8
+ br label %for.cond6.preheader
+
+for.cond6.preheader: ; preds = %for.cond.cleanup8, %for.body
+ %indvars.iv42 = phi i64 [ 0, %for.body ], [ %indvars.iv.next43, %for.cond.cleanup8 ]
+ br label %for.body9
+
+for.cond.cleanup4: ; preds = %for.cond.cleanup8
+ %tmp = load double, double* %arrayidx, align 8
+ call void @fn2(double %tmp)
+ %indvars.iv.next46 = add nuw nsw i64 %indvars.iv45, 1
+ %exitcond47 = icmp ne i64 %indvars.iv.next46, 100
+ br i1 %exitcond47, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup8: ; preds = %for.body9
+ %indvars.iv.next43 = add nuw nsw i64 %indvars.iv42, 1
+ %exitcond44 = icmp ne i64 %indvars.iv.next43, 1000
+ br i1 %exitcond44, label %for.cond6.preheader, label %for.cond.cleanup4
+
+for.body9: ; preds = %for.body9, %for.cond6.preheader
+ %indvars.iv = phi i64 [ 1, %for.cond6.preheader ], [ %indvars.iv.next, %for.body9 ]
+ %arrayidx13 = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv42
+ %tmp1 = load i32, i32* %arrayidx13, align 4
+ %tmp2 = trunc i64 %indvars.iv45 to i32
+ %add = add nsw i32 %tmp1, %tmp2
+ store i32 %add, i32* %arrayidx13, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp ne i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.body9, label %for.cond.cleanup8
+}
+
+declare double @fn1()
+declare void @fn2(double)
+
+
+;; After interchange %indvars.iv (j) should increment as the middle loop.
+;; After interchange %indvars.iv42 (i) should increment with the inner most loop.
+
+; CHECK-LABEL: @interchange_09
+
+; CHECK: for.body:
+; CHECK: %indvars.iv45 = phi i64 [ %indvars.iv.next46, %for.cond.cleanup4 ], [ 0, %for.body.preheader ]
+; CHECK: %call = call double @fn1()
+; CHECK: %arrayidx = getelementptr inbounds [100 x double], [100 x double]* @T, i64 0, i64 %indvars.iv45
+; CHECK: store double %call, double* %arrayidx, align 8
+; CHECK: br label %for.body9.preheader
+
+; CHECK: for.cond6.preheader.preheader:
+; CHECK: br label %for.cond6.preheader
+
+; CHECK: for.cond6.preheader:
+; CHECK: %indvars.iv42 = phi i64 [ %indvars.iv.next43, %for.cond.cleanup8 ], [ 0, %for.cond6.preheader.preheader ]
+; CHECK: br label %for.body9.split1
+
+; CHECK: for.body9.preheader:
+; CHECK: br label %for.body9
+
+; CHECK: for.cond.cleanup4:
+; CHECK: %tmp = load double, double* %arrayidx, align 8
+; CHECK: call void @fn2(double %tmp)
+; CHECK: %indvars.iv.next46 = add nuw nsw i64 %indvars.iv45, 1
+; CHECK: %exitcond47 = icmp ne i64 %indvars.iv.next46, 100
+; CHECK: br i1 %exitcond47, label %for.body, label %for.cond.cleanup
+
+; CHECK: for.cond.cleanup8:
+; CHECK: %indvars.iv.next43 = add nuw nsw i64 %indvars.iv42, 1
+; CHECK: %exitcond44 = icmp ne i64 %indvars.iv.next43, 1000
+; CHECK: br i1 %exitcond44, label %for.cond6.preheader, label %for.body9.split
+
+; CHECK: for.body9:
+; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body9.split ], [ 1, %for.body9.preheader ]
+; CHECK: br label %for.cond6.preheader.preheader
+
+; CHECK: for.body9.split1:
+; CHECK: %arrayidx13 = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv42
+; CHECK: store i32 %add, i32* %arrayidx13, align 4
+; CHECK: br label %for.cond.cleanup8
+
+; CHECK: for.body9.split:
+; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+; CHECK: %exitcond = icmp ne i64 %indvars.iv.next, 1000
+; CHECK: br i1 %exitcond, label %for.body9, label %for.cond.cleanup4
diff --git a/test/Transforms/LoopInterchange/interchange-not-profitable.ll b/test/Transforms/LoopInterchange/interchange-not-profitable.ll
new file mode 100644
index 0000000000000..67a63cab08bd1
--- /dev/null
+++ b/test/Transforms/LoopInterchange/interchange-not-profitable.ll
@@ -0,0 +1,66 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+@B = common global [100 x i32] zeroinitializer
+
+;; Loops should not be interchanged in this case as it is not profitable.
+;; for(int i=0;i<100;i++)
+;; for(int j=0;j<100;j++)
+;; A[i][j] = A[i][j]+k;
+
+define void @interchange_03(i32 %k) {
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader:
+ %indvars.iv21 = phi i64 [ 0, %entry ], [ %indvars.iv.next22, %for.inc10 ]
+ br label %for.body3
+
+for.body3:
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv21, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx5
+ %add = add nsw i32 %0, %k
+ store i32 %add, i32* %arrayidx5
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 100
+ br i1 %exitcond, label %for.inc10, label %for.body3
+
+for.inc10:
+ %indvars.iv.next22 = add nuw nsw i64 %indvars.iv21, 1
+ %exitcond23 = icmp eq i64 %indvars.iv.next22, 100
+ br i1 %exitcond23, label %for.end12, label %for.cond1.preheader
+
+for.end12:
+ ret void
+}
+
+; CHECK-LABEL: @interchange_03
+; CHECK: entry:
+; CHECK: br label %for.cond1.preheader.preheader
+; CHECK: for.cond1.preheader.preheader: ; preds = %entry
+; CHECK: br label %for.cond1.preheader
+; CHECK: for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc10
+; CHECK: %indvars.iv21 = phi i64 [ %indvars.iv.next22, %for.inc10 ], [ 0, %for.cond1.preheader.preheader ]
+; CHECK: br label %for.body3.preheader
+; CHECK: for.body3.preheader: ; preds = %for.cond1.preheader
+; CHECK: br label %for.body3
+; CHECK: for.body3: ; preds = %for.body3.preheader, %for.body3
+; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 0, %for.body3.preheader ]
+; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv21, i64 %indvars.iv
+; CHECK: %0 = load i32, i32* %arrayidx5
+; CHECK: %add = add nsw i32 %0, %k
+; CHECK: store i32 %add, i32* %arrayidx5
+; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+; CHECK: %exitcond = icmp eq i64 %indvars.iv.next, 100
+; CHECK: br i1 %exitcond, label %for.inc10, label %for.body3
+; CHECK: for.inc10: ; preds = %for.body3
+; CHECK: %indvars.iv.next22 = add nuw nsw i64 %indvars.iv21, 1
+; CHECK: %exitcond23 = icmp eq i64 %indvars.iv.next22, 100
+; CHECK: br i1 %exitcond23, label %for.end12, label %for.cond1.preheader
+; CHECK: for.end12: ; preds = %for.inc10
+; CHECK: ret void
diff --git a/test/Transforms/LoopInterchange/interchange-output-dependencies.ll b/test/Transforms/LoopInterchange/interchange-output-dependencies.ll
new file mode 100644
index 0000000000000..98deba96f8c6f
--- /dev/null
+++ b/test/Transforms/LoopInterchange/interchange-output-dependencies.ll
@@ -0,0 +1,86 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+
+;; Test to make sure we can handle output dependencies.
+;;
+;; for (int i = 0; i < 2; ++i)
+;; for(int j = 0; j < 3; ++j) {
+;; A[j][i] = i;
+;; A[j][i+1] = j;
+;; }
+
+@A10 = local_unnamed_addr global [3 x [3 x i32]] zeroinitializer, align 16
+
+define void @interchange_10() {
+entry:
+ br label %for.cond1.preheader
+
+for.cond.loopexit: ; preds = %for.body4
+ %exitcond28 = icmp ne i64 %indvars.iv.next27, 2
+ br i1 %exitcond28, label %for.cond1.preheader, label %for.cond.cleanup
+
+for.cond1.preheader: ; preds = %for.cond.loopexit, %entry
+ %indvars.iv26 = phi i64 [ 0, %entry ], [ %indvars.iv.next27, %for.cond.loopexit ]
+ %indvars.iv.next27 = add nuw nsw i64 %indvars.iv26, 1
+ br label %for.body4
+
+for.cond.cleanup: ; preds = %for.cond.loopexit
+ ret void
+
+for.body4: ; preds = %for.body4, %for.cond1.preheader
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body4 ]
+ %arrayidx6 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv26
+ %tmp = trunc i64 %indvars.iv26 to i32
+ store i32 %tmp, i32* %arrayidx6, align 4
+ %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
+ %tmp1 = trunc i64 %indvars.iv to i32
+ store i32 %tmp1, i32* %arrayidx10, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp ne i64 %indvars.iv.next, 3
+ br i1 %exitcond, label %for.body4, label %for.cond.loopexit
+}
+
+; CHECK-LABEL: @interchange_10
+; CHECK: entry:
+; CHECK: br label %for.body4.preheader
+
+; CHECK: for.cond1.preheader.preheader:
+; CHECK: br label %for.cond1.preheader
+
+; CHECK: for.cond.loopexit:
+; CHECK: %exitcond28 = icmp ne i64 %indvars.iv.next27, 2
+; CHECK: br i1 %exitcond28, label %for.cond1.preheader, label %for.body4.split
+
+; CHECK: for.cond1.preheader:
+; CHECK: %indvars.iv26 = phi i64 [ %indvars.iv.next27, %for.cond.loopexit ], [ 0, %for.cond1.preheader.preheader ]
+; CHECK: %indvars.iv.next27 = add nuw nsw i64 %indvars.iv26, 1
+; CHECK: br label %for.body4.split1
+
+; CHECK: for.body4.preheader:
+; CHECK: br label %for.body4
+
+; CHECK: for.cond.cleanup:
+; CHECK: ret void
+
+; CHECK: for.body4:
+; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body4.split ], [ 0, %for.body4.preheader ]
+; CHECK: br label %for.cond1.preheader.preheader
+
+; CHECK: for.body4.split1:
+; CHECK: %arrayidx6 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv26
+; CHECK: %tmp = trunc i64 %indvars.iv26 to i32
+; CHECK: store i32 %tmp, i32* %arrayidx6, align 4
+; CHECK: %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
+; CHECK: %tmp1 = trunc i64 %indvars.iv to i32
+; CHECK: store i32 %tmp1, i32* %arrayidx10, align 4
+; CHECK: br label %for.cond.loopexit
+
+; CHECK: for.body4.split:
+; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+; CHECK: %exitcond = icmp ne i64 %indvars.iv.next, 3
+; CHECK: br i1 %exitcond, label %for.body4, label %for.cond.cleanup
diff --git a/test/Transforms/LoopInterchange/interchange-simple-count-down.ll b/test/Transforms/LoopInterchange/interchange-simple-count-down.ll
new file mode 100644
index 0000000000000..70ba5940257f5
--- /dev/null
+++ b/test/Transforms/LoopInterchange/interchange-simple-count-down.ll
@@ -0,0 +1,69 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+@B = common global [100 x i32] zeroinitializer
+
+;; for(int i=0;i<100;i++)
+;; for(int j=100;j>=0;j--)
+;; A[j][i] = A[j][i]+k;
+
+define void @interchange_02(i32 %k) {
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader:
+ %indvars.iv19 = phi i64 [ 0, %entry ], [ %indvars.iv.next20, %for.inc10 ]
+ br label %for.body3
+
+for.body3:
+ %indvars.iv = phi i64 [ 100, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv19
+ %0 = load i32, i32* %arrayidx5
+ %add = add nsw i32 %0, %k
+ store i32 %add, i32* %arrayidx5
+ %indvars.iv.next = add nsw i64 %indvars.iv, -1
+ %cmp2 = icmp sgt i64 %indvars.iv, 0
+ br i1 %cmp2, label %for.body3, label %for.inc10
+
+for.inc10:
+ %indvars.iv.next20 = add nuw nsw i64 %indvars.iv19, 1
+ %exitcond = icmp eq i64 %indvars.iv.next20, 100
+ br i1 %exitcond, label %for.end11, label %for.cond1.preheader
+
+for.end11:
+ ret void
+}
+
+; CHECK-LABEL: @interchange_02
+; CHECK: entry:
+; CHECK: br label %for.body3.preheader
+; CHECK: for.cond1.preheader.preheader:
+; CHECK: br label %for.cond1.preheader
+; CHECK: for.cond1.preheader:
+; CHECK: %indvars.iv19 = phi i64 [ %indvars.iv.next20, %for.inc10 ], [ 0, %for.cond1.preheader.preheader ]
+; CHECK: br label %for.body3.split1
+; CHECK: for.body3.preheader:
+; CHECK: br label %for.body3
+; CHECK: for.body3:
+; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3.split ], [ 100, %for.body3.preheader ]
+; CHECK: br label %for.cond1.preheader.preheader
+; CHECK: for.body3.split1: ; preds = %for.cond1.preheader
+; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv19
+; CHECK: %0 = load i32, i32* %arrayidx5
+; CHECK: %add = add nsw i32 %0, %k
+; CHECK: store i32 %add, i32* %arrayidx5
+; CHECK: br label %for.inc10
+; CHECK: for.body3.split:
+; CHECK: %indvars.iv.next = add nsw i64 %indvars.iv, -1
+; CHECK: %cmp2 = icmp sgt i64 %indvars.iv, 0
+; CHECK: br i1 %cmp2, label %for.body3, label %for.end11
+; CHECK: for.inc10:
+; CHECK: %indvars.iv.next20 = add nuw nsw i64 %indvars.iv19, 1
+; CHECK: %exitcond = icmp eq i64 %indvars.iv.next20, 100
+; CHECK: br i1 %exitcond, label %for.body3.split, label %for.cond1.preheader
+; CHECK: for.end11:
+; CHECK: ret void
diff --git a/test/Transforms/LoopInterchange/interchange-simple-count-up.ll b/test/Transforms/LoopInterchange/interchange-simple-count-up.ll
new file mode 100644
index 0000000000000..4febe0269810d
--- /dev/null
+++ b/test/Transforms/LoopInterchange/interchange-simple-count-up.ll
@@ -0,0 +1,86 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+@B = common global [100 x i32] zeroinitializer
+
+;; for(int i=0;i<N;i++)
+;; for(int j=1;j<N;j++)
+;; A[j][i] = A[j][i]+k;
+
+define void @interchange_01(i32 %k, i32 %N) {
+entry:
+ %cmp21 = icmp sgt i32 %N, 0
+ br i1 %cmp21, label %for.cond1.preheader.lr.ph, label %for.end12
+
+for.cond1.preheader.lr.ph:
+ %cmp219 = icmp sgt i32 %N, 1
+ %0 = add i32 %N, -1
+ br label %for.cond1.preheader
+
+for.cond1.preheader:
+ %indvars.iv23 = phi i64 [ 0, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next24, %for.inc10 ]
+ br i1 %cmp219, label %for.body3, label %for.inc10
+
+for.body3:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 1, %for.cond1.preheader ]
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+ %1 = load i32, i32* %arrayidx5
+ %add = add nsw i32 %1, %k
+ store i32 %add, i32* %arrayidx5
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %0
+ br i1 %exitcond, label %for.inc10, label %for.body3
+
+for.inc10:
+ %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
+ %lftr.wideiv25 = trunc i64 %indvars.iv23 to i32
+ %exitcond26 = icmp eq i32 %lftr.wideiv25, %0
+ br i1 %exitcond26, label %for.end12, label %for.cond1.preheader
+
+for.end12:
+ ret void
+}
+
+; CHECK-LABEL: @interchange_01
+; CHECK: entry:
+; CHECK: %cmp21 = icmp sgt i32 %N, 0
+; CHECK: br i1 %cmp21, label %for.body3.preheader, label %for.end12
+; CHECK: for.cond1.preheader.lr.ph:
+; CHECK: br label %for.cond1.preheader
+; CHECK: for.cond1.preheader:
+; CHECK: %indvars.iv23 = phi i64 [ 0, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next24, %for.inc10 ]
+; CHECK: br i1 %cmp219, label %for.body3.split1, label %for.end12.loopexit
+; CHECK: for.body3.preheader:
+; CHECK: %cmp219 = icmp sgt i32 %N, 1
+; CHECK: %0 = add i32 %N, -1
+; CHECK: br label %for.body3
+; CHECK: for.body3:
+; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3.split ], [ 1, %for.body3.preheader ]
+; CHECK: br label %for.cond1.preheader.lr.ph
+; CHECK: for.body3.split1:
+; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv23
+; CHECK: %1 = load i32, i32* %arrayidx5
+; CHECK: %add = add nsw i32 %1, %k
+; CHECK: store i32 %add, i32* %arrayidx5
+; CHECK: br label %for.inc10.loopexit
+; CHECK: for.body3.split:
+; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+; CHECK: %lftr.wideiv = trunc i64 %indvars.iv to i32
+; CHECK: %exitcond = icmp eq i32 %lftr.wideiv, %0
+; CHECK: br i1 %exitcond, label %for.end12.loopexit, label %for.body3
+; CHECK: for.inc10.loopexit:
+; CHECK: br label %for.inc10
+; CHECK: for.inc10:
+; CHECK: %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
+; CHECK: %lftr.wideiv25 = trunc i64 %indvars.iv23 to i32
+; CHECK: %exitcond26 = icmp eq i32 %lftr.wideiv25, %0
+; CHECK: br i1 %exitcond26, label %for.body3.split, label %for.cond1.preheader
+; CHECK: for.end12.loopexit:
+; CHECK: br label %for.end12
+; CHECK: for.end12:
+; CHECK: ret void
diff --git a/test/Transforms/LoopInterchange/interchange.ll b/test/Transforms/LoopInterchange/interchange.ll
deleted file mode 100644
index 77b33e43bedc7..0000000000000
--- a/test/Transforms/LoopInterchange/interchange.ll
+++ /dev/null
@@ -1,749 +0,0 @@
-; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
-;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-@A = common global [100 x [100 x i32]] zeroinitializer
-@B = common global [100 x i32] zeroinitializer
-@C = common global [100 x [100 x i32]] zeroinitializer
-@D = common global [100 x [100 x [100 x i32]]] zeroinitializer
-
-declare void @foo(...)
-
-;;--------------------------------------Test case 01------------------------------------
-;; for(int i=0;i<N;i++)
-;; for(int j=1;j<N;j++)
-;; A[j][i] = A[j][i]+k;
-
-define void @interchange_01(i32 %k, i32 %N) {
-entry:
- %cmp21 = icmp sgt i32 %N, 0
- br i1 %cmp21, label %for.cond1.preheader.lr.ph, label %for.end12
-
-for.cond1.preheader.lr.ph:
- %cmp219 = icmp sgt i32 %N, 1
- %0 = add i32 %N, -1
- br label %for.cond1.preheader
-
-for.cond1.preheader:
- %indvars.iv23 = phi i64 [ 0, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next24, %for.inc10 ]
- br i1 %cmp219, label %for.body3, label %for.inc10
-
-for.body3:
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 1, %for.cond1.preheader ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv23
- %1 = load i32, i32* %arrayidx5
- %add = add nsw i32 %1, %k
- store i32 %add, i32* %arrayidx5
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %lftr.wideiv = trunc i64 %indvars.iv to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %0
- br i1 %exitcond, label %for.inc10, label %for.body3
-
-for.inc10:
- %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
- %lftr.wideiv25 = trunc i64 %indvars.iv23 to i32
- %exitcond26 = icmp eq i32 %lftr.wideiv25, %0
- br i1 %exitcond26, label %for.end12, label %for.cond1.preheader
-
-for.end12:
- ret void
-}
-
-; CHECK-LABEL: @interchange_01
-; CHECK: entry:
-; CHECK: %cmp21 = icmp sgt i32 %N, 0
-; CHECK: br i1 %cmp21, label %for.body3.preheader, label %for.end12
-; CHECK: for.cond1.preheader.lr.ph:
-; CHECK: br label %for.cond1.preheader
-; CHECK: for.cond1.preheader:
-; CHECK: %indvars.iv23 = phi i64 [ 0, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next24, %for.inc10 ]
-; CHECK: br i1 %cmp219, label %for.body3.split1, label %for.end12.loopexit
-; CHECK: for.body3.preheader:
-; CHECK: %cmp219 = icmp sgt i32 %N, 1
-; CHECK: %0 = add i32 %N, -1
-; CHECK: br label %for.body3
-; CHECK: for.body3:
-; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3.split ], [ 1, %for.body3.preheader ]
-; CHECK: br label %for.cond1.preheader.lr.ph
-; CHECK: for.body3.split1:
-; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv23
-; CHECK: %1 = load i32, i32* %arrayidx5
-; CHECK: %add = add nsw i32 %1, %k
-; CHECK: store i32 %add, i32* %arrayidx5
-; CHECK: br label %for.inc10.loopexit
-; CHECK: for.body3.split:
-; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-; CHECK: %lftr.wideiv = trunc i64 %indvars.iv to i32
-; CHECK: %exitcond = icmp eq i32 %lftr.wideiv, %0
-; CHECK: br i1 %exitcond, label %for.end12.loopexit, label %for.body3
-; CHECK: for.inc10.loopexit:
-; CHECK: br label %for.inc10
-; CHECK: for.inc10:
-; CHECK: %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
-; CHECK: %lftr.wideiv25 = trunc i64 %indvars.iv23 to i32
-; CHECK: %exitcond26 = icmp eq i32 %lftr.wideiv25, %0
-; CHECK: br i1 %exitcond26, label %for.body3.split, label %for.cond1.preheader
-; CHECK: for.end12.loopexit:
-; CHECK: br label %for.end12
-; CHECK: for.end12:
-; CHECK: ret void
-
-;;--------------------------------------Test case 02-------------------------------------
-
-;; for(int i=0;i<100;i++)
-;; for(int j=100;j>=0;j--)
-;; A[j][i] = A[j][i]+k;
-
-define void @interchange_02(i32 %k) {
-entry:
- br label %for.cond1.preheader
-
-for.cond1.preheader:
- %indvars.iv19 = phi i64 [ 0, %entry ], [ %indvars.iv.next20, %for.inc10 ]
- br label %for.body3
-
-for.body3:
- %indvars.iv = phi i64 [ 100, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv19
- %0 = load i32, i32* %arrayidx5
- %add = add nsw i32 %0, %k
- store i32 %add, i32* %arrayidx5
- %indvars.iv.next = add nsw i64 %indvars.iv, -1
- %cmp2 = icmp sgt i64 %indvars.iv, 0
- br i1 %cmp2, label %for.body3, label %for.inc10
-
-for.inc10:
- %indvars.iv.next20 = add nuw nsw i64 %indvars.iv19, 1
- %exitcond = icmp eq i64 %indvars.iv.next20, 100
- br i1 %exitcond, label %for.end11, label %for.cond1.preheader
-
-for.end11:
- ret void
-}
-
-; CHECK-LABEL: @interchange_02
-; CHECK: entry:
-; CHECK: br label %for.body3.preheader
-; CHECK: for.cond1.preheader.preheader:
-; CHECK: br label %for.cond1.preheader
-; CHECK: for.cond1.preheader:
-; CHECK: %indvars.iv19 = phi i64 [ %indvars.iv.next20, %for.inc10 ], [ 0, %for.cond1.preheader.preheader ]
-; CHECK: br label %for.body3.split1
-; CHECK: for.body3.preheader:
-; CHECK: br label %for.body3
-; CHECK: for.body3:
-; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3.split ], [ 100, %for.body3.preheader ]
-; CHECK: br label %for.cond1.preheader.preheader
-; CHECK: for.body3.split1: ; preds = %for.cond1.preheader
-; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv19
-; CHECK: %0 = load i32, i32* %arrayidx5
-; CHECK: %add = add nsw i32 %0, %k
-; CHECK: store i32 %add, i32* %arrayidx5
-; CHECK: br label %for.inc10
-; CHECK: for.body3.split:
-; CHECK: %indvars.iv.next = add nsw i64 %indvars.iv, -1
-; CHECK: %cmp2 = icmp sgt i64 %indvars.iv, 0
-; CHECK: br i1 %cmp2, label %for.body3, label %for.end11
-; CHECK: for.inc10:
-; CHECK: %indvars.iv.next20 = add nuw nsw i64 %indvars.iv19, 1
-; CHECK: %exitcond = icmp eq i64 %indvars.iv.next20, 100
-; CHECK: br i1 %exitcond, label %for.body3.split, label %for.cond1.preheader
-; CHECK: for.end11:
-; CHECK: ret void
-
-;;--------------------------------------Test case 03-------------------------------------
-;; Loops should not be interchanged in this case as it is not profitable.
-;; for(int i=0;i<100;i++)
-;; for(int j=0;j<100;j++)
-;; A[i][j] = A[i][j]+k;
-
-define void @interchange_03(i32 %k) {
-entry:
- br label %for.cond1.preheader
-
-for.cond1.preheader:
- %indvars.iv21 = phi i64 [ 0, %entry ], [ %indvars.iv.next22, %for.inc10 ]
- br label %for.body3
-
-for.body3:
- %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv21, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx5
- %add = add nsw i32 %0, %k
- store i32 %add, i32* %arrayidx5
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %exitcond = icmp eq i64 %indvars.iv.next, 100
- br i1 %exitcond, label %for.inc10, label %for.body3
-
-for.inc10:
- %indvars.iv.next22 = add nuw nsw i64 %indvars.iv21, 1
- %exitcond23 = icmp eq i64 %indvars.iv.next22, 100
- br i1 %exitcond23, label %for.end12, label %for.cond1.preheader
-
-for.end12:
- ret void
-}
-
-; CHECK-LABEL: @interchange_03
-; CHECK: entry:
-; CHECK: br label %for.cond1.preheader.preheader
-; CHECK: for.cond1.preheader.preheader: ; preds = %entry
-; CHECK: br label %for.cond1.preheader
-; CHECK: for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc10
-; CHECK: %indvars.iv21 = phi i64 [ %indvars.iv.next22, %for.inc10 ], [ 0, %for.cond1.preheader.preheader ]
-; CHECK: br label %for.body3.preheader
-; CHECK: for.body3.preheader: ; preds = %for.cond1.preheader
-; CHECK: br label %for.body3
-; CHECK: for.body3: ; preds = %for.body3.preheader, %for.body3
-; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 0, %for.body3.preheader ]
-; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv21, i64 %indvars.iv
-; CHECK: %0 = load i32, i32* %arrayidx5
-; CHECK: %add = add nsw i32 %0, %k
-; CHECK: store i32 %add, i32* %arrayidx5
-; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-; CHECK: %exitcond = icmp eq i64 %indvars.iv.next, 100
-; CHECK: br i1 %exitcond, label %for.inc10, label %for.body3
-; CHECK: for.inc10: ; preds = %for.body3
-; CHECK: %indvars.iv.next22 = add nuw nsw i64 %indvars.iv21, 1
-; CHECK: %exitcond23 = icmp eq i64 %indvars.iv.next22, 100
-; CHECK: br i1 %exitcond23, label %for.end12, label %for.cond1.preheader
-; CHECK: for.end12: ; preds = %for.inc10
-; CHECK: ret void
-
-
-;;--------------------------------------Test case 04-------------------------------------
-;; Loops should not be interchanged in this case as it is not legal due to dependency.
-;; for(int j=0;j<99;j++)
-;; for(int i=0;i<99;i++)
-;; A[j][i+1] = A[j+1][i]+k;
-
-define void @interchange_04(i32 %k){
-entry:
- br label %for.cond1.preheader
-
-for.cond1.preheader:
- %indvars.iv23 = phi i64 [ 0, %entry ], [ %indvars.iv.next24, %for.inc12 ]
- %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
- br label %for.body3
-
-for.body3:
- %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv.next24, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx5
- %add6 = add nsw i32 %0, %k
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv23, i64 %indvars.iv.next
- store i32 %add6, i32* %arrayidx11
- %exitcond = icmp eq i64 %indvars.iv.next, 99
- br i1 %exitcond, label %for.inc12, label %for.body3
-
-for.inc12:
- %exitcond25 = icmp eq i64 %indvars.iv.next24, 99
- br i1 %exitcond25, label %for.end14, label %for.cond1.preheader
-
-for.end14:
- ret void
-}
-
-; CHECK-LABEL: @interchange_04
-; CHECK: entry:
-; CHECK: br label %for.cond1.preheader
-; CHECK: for.cond1.preheader: ; preds = %for.inc12, %entry
-; CHECK: %indvars.iv23 = phi i64 [ 0, %entry ], [ %indvars.iv.next24, %for.inc12 ]
-; CHECK: %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
-; CHECK: br label %for.body3
-; CHECK: for.body3: ; preds = %for.body3, %for.cond1.preheader
-; CHECK: %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
-; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv.next24, i64 %indvars.iv
-; CHECK: %0 = load i32, i32* %arrayidx5
-; CHECK: %add6 = add nsw i32 %0, %k
-; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-; CHECK: %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv23, i64 %indvars.iv.next
-; CHECK: store i32 %add6, i32* %arrayidx11
-; CHECK: %exitcond = icmp eq i64 %indvars.iv.next, 99
-; CHECK: br i1 %exitcond, label %for.inc12, label %for.body3
-; CHECK: for.inc12: ; preds = %for.body3
-; CHECK: %exitcond25 = icmp eq i64 %indvars.iv.next24, 99
-; CHECK: br i1 %exitcond25, label %for.end14, label %for.cond1.preheader
-; CHECK: for.end14: ; preds = %for.inc12
-; CHECK: ret void
-
-
-
-;;--------------------------------------Test case 05-------------------------------------
-;; Loops not tightly nested are not interchanged
-;; for(int j=0;j<N;j++) {
-;; B[j] = j+k;
-;; for(int i=0;i<N;i++)
-;; A[j][i] = A[j][i]+B[j];
-;; }
-
-define void @interchange_05(i32 %k, i32 %N){
-entry:
- %cmp30 = icmp sgt i32 %N, 0
- br i1 %cmp30, label %for.body.lr.ph, label %for.end17
-
-for.body.lr.ph:
- %0 = add i32 %N, -1
- %1 = zext i32 %k to i64
- br label %for.body
-
-for.body:
- %indvars.iv32 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next33, %for.inc15 ]
- %2 = add nsw i64 %indvars.iv32, %1
- %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @B, i64 0, i64 %indvars.iv32
- %3 = trunc i64 %2 to i32
- store i32 %3, i32* %arrayidx
- br label %for.body3
-
-for.body3:
- %indvars.iv = phi i64 [ 0, %for.body ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
- %4 = load i32, i32* %arrayidx7
- %add10 = add nsw i32 %3, %4
- store i32 %add10, i32* %arrayidx7
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %lftr.wideiv = trunc i64 %indvars.iv to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %0
- br i1 %exitcond, label %for.inc15, label %for.body3
-
-for.inc15:
- %indvars.iv.next33 = add nuw nsw i64 %indvars.iv32, 1
- %lftr.wideiv35 = trunc i64 %indvars.iv32 to i32
- %exitcond36 = icmp eq i32 %lftr.wideiv35, %0
- br i1 %exitcond36, label %for.end17, label %for.body
-
-for.end17:
- ret void
-}
-
-; CHECK-LABEL: @interchange_05
-; CHECK: entry:
-; CHECK: %cmp30 = icmp sgt i32 %N, 0
-; CHECK: br i1 %cmp30, label %for.body.lr.ph, label %for.end17
-; CHECK: for.body.lr.ph:
-; CHECK: %0 = add i32 %N, -1
-; CHECK: %1 = zext i32 %k to i64
-; CHECK: br label %for.body
-; CHECK: for.body:
-; CHECK: %indvars.iv32 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next33, %for.inc15 ]
-; CHECK: %2 = add nsw i64 %indvars.iv32, %1
-; CHECK: %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @B, i64 0, i64 %indvars.iv32
-; CHECK: %3 = trunc i64 %2 to i32
-; CHECK: store i32 %3, i32* %arrayidx
-; CHECK: br label %for.body3.preheader
-; CHECK: for.body3.preheader:
-; CHECK: br label %for.body3
-; CHECK: for.body3:
-; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 0, %for.body3.preheader ]
-; CHECK: %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
-; CHECK: %4 = load i32, i32* %arrayidx7
-; CHECK: %add10 = add nsw i32 %3, %4
-; CHECK: store i32 %add10, i32* %arrayidx7
-; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-; CHECK: %lftr.wideiv = trunc i64 %indvars.iv to i32
-; CHECK: %exitcond = icmp eq i32 %lftr.wideiv, %0
-; CHECK: br i1 %exitcond, label %for.inc15, label %for.body3
-; CHECK: for.inc15:
-; CHECK: %indvars.iv.next33 = add nuw nsw i64 %indvars.iv32, 1
-; CHECK: %lftr.wideiv35 = trunc i64 %indvars.iv32 to i32
-; CHECK: %exitcond36 = icmp eq i32 %lftr.wideiv35, %0
-; CHECK: br i1 %exitcond36, label %for.end17.loopexit, label %for.body
-; CHECK: for.end17.loopexit:
-; CHECK: br label %for.end17
-; CHECK: for.end17:
-; CHECK: ret void
-
-
-;;--------------------------------------Test case 06-------------------------------------
-;; Loops not tightly nested are not interchanged
-;; for(int j=0;j<N;j++) {
-;; foo();
-;; for(int i=2;i<N;i++)
-;; A[j][i] = A[j][i]+k;
-;; }
-
-define void @interchange_06(i32 %k, i32 %N) {
-entry:
- %cmp22 = icmp sgt i32 %N, 0
- br i1 %cmp22, label %for.body.lr.ph, label %for.end12
-
-for.body.lr.ph:
- %0 = add i32 %N, -1
- br label %for.body
-
-for.body:
- %indvars.iv24 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next25, %for.inc10 ]
- tail call void (...) @foo()
- br label %for.body3
-
-for.body3:
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 2, %for.body ]
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv24, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx5
- %add = add nsw i32 %1, %k
- store i32 %add, i32* %arrayidx5
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %lftr.wideiv = trunc i64 %indvars.iv to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %0
- br i1 %exitcond, label %for.inc10, label %for.body3
-
-for.inc10:
- %indvars.iv.next25 = add nuw nsw i64 %indvars.iv24, 1
- %lftr.wideiv26 = trunc i64 %indvars.iv24 to i32
- %exitcond27 = icmp eq i32 %lftr.wideiv26, %0
- br i1 %exitcond27, label %for.end12, label %for.body
-
-for.end12:
- ret void
-}
-;; Here we are checking if the inner phi is not split then we have not interchanged.
-; CHECK-LABEL: @interchange_06
-; CHECK: phi i64 [ %indvars.iv.next, %for.body3 ], [ 2, %for.body3.preheader ]
-; CHECK-NEXT: getelementptr
-; CHECK-NEXT: %1 = load
-
-;;--------------------------------------Test case 07-------------------------------------
-;; FIXME:
-;; Test for interchange when we have an lcssa phi. This should ideally be interchanged but it is currently not supported.
-;; for(gi=1;gi<N;gi++)
-;; for(gj=1;gj<M;gj++)
-;; A[gj][gi] = A[gj - 1][gi] + C[gj][gi];
-
-@gi = common global i32 0
-@gj = common global i32 0
-
-define void @interchange_07(i32 %N, i32 %M){
-entry:
- store i32 1, i32* @gi
- %cmp21 = icmp sgt i32 %N, 1
- br i1 %cmp21, label %for.cond1.preheader.lr.ph, label %for.end16
-
-for.cond1.preheader.lr.ph:
- %cmp218 = icmp sgt i32 %M, 1
- %gi.promoted = load i32, i32* @gi
- %0 = add i32 %M, -1
- %1 = sext i32 %gi.promoted to i64
- %2 = sext i32 %N to i64
- %3 = add i32 %gi.promoted, 1
- %4 = icmp slt i32 %3, %N
- %smax = select i1 %4, i32 %N, i32 %3
- br label %for.cond1.preheader
-
-for.cond1.preheader:
- %indvars.iv25 = phi i64 [ %1, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next26, %for.inc14 ]
- br i1 %cmp218, label %for.body3, label %for.inc14
-
-for.body3:
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 1, %for.cond1.preheader ]
- %5 = add nsw i64 %indvars.iv, -1
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %5, i64 %indvars.iv25
- %6 = load i32, i32* %arrayidx5
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %indvars.iv, i64 %indvars.iv25
- %7 = load i32, i32* %arrayidx9
- %add = add nsw i32 %7, %6
- %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv, i64 %indvars.iv25
- store i32 %add, i32* %arrayidx13
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %lftr.wideiv = trunc i64 %indvars.iv to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %0
- br i1 %exitcond, label %for.inc14, label %for.body3
-
-for.inc14:
- %inc.lcssa23 = phi i32 [ 1, %for.cond1.preheader ], [ %M, %for.body3 ]
- %indvars.iv.next26 = add nsw i64 %indvars.iv25, 1
- %cmp = icmp slt i64 %indvars.iv.next26, %2
- br i1 %cmp, label %for.cond1.preheader, label %for.cond.for.end16_crit_edge
-
-for.cond.for.end16_crit_edge:
- store i32 %inc.lcssa23, i32* @gj
- store i32 %smax, i32* @gi
- br label %for.end16
-
-for.end16:
- ret void
-}
-
-; CHECK-LABEL: @interchange_07
-; CHECK: for.body3: ; preds = %for.body3.preheader, %for.body3
-; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 1, %for.body3.preheader ]
-; CHECK: %5 = add nsw i64 %indvars.iv, -1
-; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %5, i64 %indvars.iv25
-; CHECK: %6 = load i32, i32* %arrayidx5
-; CHECK: %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @C, i64 0, i64 %indvars.iv, i64 %indvars.iv25
-
-;;------------------------------------------------Test case 08-------------------------------
-;; Test for interchange in loop nest greater than 2.
-;; for(int i=0;i<100;i++)
-;; for(int j=0;j<100;j++)
-;; for(int k=0;k<100;k++)
-;; D[i][k][j] = D[i][k][j]+t;
-
-define void @interchange_08(i32 %t){
-entry:
- br label %for.cond1.preheader
-
-for.cond1.preheader: ; preds = %for.inc15, %entry
- %i.028 = phi i32 [ 0, %entry ], [ %inc16, %for.inc15 ]
- br label %for.cond4.preheader
-
-for.cond4.preheader: ; preds = %for.inc12, %for.cond1.preheader
- %j.027 = phi i32 [ 0, %for.cond1.preheader ], [ %inc13, %for.inc12 ]
- br label %for.body6
-
-for.body6: ; preds = %for.body6, %for.cond4.preheader
- %k.026 = phi i32 [ 0, %for.cond4.preheader ], [ %inc, %for.body6 ]
- %arrayidx8 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* @D, i32 0, i32 %i.028, i32 %k.026, i32 %j.027
- %0 = load i32, i32* %arrayidx8
- %add = add nsw i32 %0, %t
- store i32 %add, i32* %arrayidx8
- %inc = add nuw nsw i32 %k.026, 1
- %exitcond = icmp eq i32 %inc, 100
- br i1 %exitcond, label %for.inc12, label %for.body6
-
-for.inc12: ; preds = %for.body6
- %inc13 = add nuw nsw i32 %j.027, 1
- %exitcond29 = icmp eq i32 %inc13, 100
- br i1 %exitcond29, label %for.inc15, label %for.cond4.preheader
-
-for.inc15: ; preds = %for.inc12
- %inc16 = add nuw nsw i32 %i.028, 1
- %exitcond30 = icmp eq i32 %inc16, 100
- br i1 %exitcond30, label %for.end17, label %for.cond1.preheader
-
-for.end17: ; preds = %for.inc15
- ret void
-}
-; CHECK-LABEL: @interchange_08
-; CHECK: entry:
-; CHECK: br label %for.cond1.preheader.preheader
-; CHECK: for.cond1.preheader.preheader: ; preds = %entry
-; CHECK: br label %for.cond1.preheader
-; CHECK: for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc15
-; CHECK: %i.028 = phi i32 [ %inc16, %for.inc15 ], [ 0, %for.cond1.preheader.preheader ]
-; CHECK: br label %for.body6.preheader
-; CHECK: for.cond4.preheader.preheader: ; preds = %for.body6
-; CHECK: br label %for.cond4.preheader
-; CHECK: for.cond4.preheader: ; preds = %for.cond4.preheader.preheader, %for.inc12
-; CHECK: %j.027 = phi i32 [ %inc13, %for.inc12 ], [ 0, %for.cond4.preheader.preheader ]
-; CHECK: br label %for.body6.split1
-; CHECK: for.body6.preheader: ; preds = %for.cond1.preheader
-; CHECK: br label %for.body6
-; CHECK: for.body6: ; preds = %for.body6.preheader, %for.body6.split
-; CHECK: %k.026 = phi i32 [ %inc, %for.body6.split ], [ 0, %for.body6.preheader ]
-; CHECK: br label %for.cond4.preheader.preheader
-; CHECK: for.body6.split1: ; preds = %for.cond4.preheader
-; CHECK: %arrayidx8 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* @D, i32 0, i32 %i.028, i32 %k.026, i32 %j.027
-; CHECK: %0 = load i32, i32* %arrayidx8
-; CHECK: %add = add nsw i32 %0, %t
-; CHECK: store i32 %add, i32* %arrayidx8
-; CHECK: br label %for.inc12
-; CHECK: for.body6.split: ; preds = %for.inc12
-; CHECK: %inc = add nuw nsw i32 %k.026, 1
-; CHECK: %exitcond = icmp eq i32 %inc, 100
-; CHECK: br i1 %exitcond, label %for.inc15, label %for.body6
-; CHECK: for.inc12: ; preds = %for.body6.split1
-; CHECK: %inc13 = add nuw nsw i32 %j.027, 1
-; CHECK: %exitcond29 = icmp eq i32 %inc13, 100
-; CHECK: br i1 %exitcond29, label %for.body6.split, label %for.cond4.preheader
-; CHECK: for.inc15: ; preds = %for.body6.split
-; CHECK: %inc16 = add nuw nsw i32 %i.028, 1
-; CHECK: %exitcond30 = icmp eq i32 %inc16, 100
-; CHECK: br i1 %exitcond30, label %for.end17, label %for.cond1.preheader
-; CHECK: for.end17: ; preds = %for.inc15
-; CHECK: ret void
-
-;;-----------------------------------Test case 09-------------------------------
-;; Test that a flow dependency in outer loop doesn't prevent interchange in
-;; loops i and j.
-;;
-;; for (int k = 0; k < 100; ++k) {
-;; T[k] = fn1();
-;; for (int i = 0; i < 1000; ++i)
-;; for(int j = 1; j < 1000; ++j)
-;; Arr[j][i] = Arr[j][i]+k;
-;; fn2(T[k]);
-;; }
-
-@T = internal global [100 x double] zeroinitializer, align 4
-@Arr = internal global [1000 x [1000 x i32]] zeroinitializer, align 4
-
-define void @interchange_09(i32 %k) {
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %for.cond.cleanup4
- ret void
-
-for.body: ; preds = %for.cond.cleanup4, %entry
- %indvars.iv45 = phi i64 [ 0, %entry ], [ %indvars.iv.next46, %for.cond.cleanup4 ]
- %call = call double @fn1()
- %arrayidx = getelementptr inbounds [100 x double], [100 x double]* @T, i64 0, i64 %indvars.iv45
- store double %call, double* %arrayidx, align 8
- br label %for.cond6.preheader
-
-for.cond6.preheader: ; preds = %for.cond.cleanup8, %for.body
- %indvars.iv42 = phi i64 [ 0, %for.body ], [ %indvars.iv.next43, %for.cond.cleanup8 ]
- br label %for.body9
-
-for.cond.cleanup4: ; preds = %for.cond.cleanup8
- %tmp = load double, double* %arrayidx, align 8
- call void @fn2(double %tmp)
- %indvars.iv.next46 = add nuw nsw i64 %indvars.iv45, 1
- %exitcond47 = icmp ne i64 %indvars.iv.next46, 100
- br i1 %exitcond47, label %for.body, label %for.cond.cleanup
-
-for.cond.cleanup8: ; preds = %for.body9
- %indvars.iv.next43 = add nuw nsw i64 %indvars.iv42, 1
- %exitcond44 = icmp ne i64 %indvars.iv.next43, 1000
- br i1 %exitcond44, label %for.cond6.preheader, label %for.cond.cleanup4
-
-for.body9: ; preds = %for.body9, %for.cond6.preheader
- %indvars.iv = phi i64 [ 1, %for.cond6.preheader ], [ %indvars.iv.next, %for.body9 ]
- %arrayidx13 = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv42
- %tmp1 = load i32, i32* %arrayidx13, align 4
- %tmp2 = trunc i64 %indvars.iv45 to i32
- %add = add nsw i32 %tmp1, %tmp2
- store i32 %add, i32* %arrayidx13, align 4
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %exitcond = icmp ne i64 %indvars.iv.next, 1000
- br i1 %exitcond, label %for.body9, label %for.cond.cleanup8
-}
-
-declare double @fn1()
-declare void @fn2(double)
-
-
-
-
-
-;; After interchange %indvars.iv (j) should increment as the middle loop.
-;; After interchange %indvars.iv42 (i) should increment with the inner most loop.
-
-; CHECK-LABEL: @interchange_09
-
-; CHECK: for.body:
-; CHECK: %indvars.iv45 = phi i64 [ %indvars.iv.next46, %for.cond.cleanup4 ], [ 0, %for.body.preheader ]
-; CHECK: %call = call double @fn1()
-; CHECK: %arrayidx = getelementptr inbounds [100 x double], [100 x double]* @T, i64 0, i64 %indvars.iv45
-; CHECK: store double %call, double* %arrayidx, align 8
-; CHECK: br label %for.body9.preheader
-
-; CHECK: for.cond6.preheader.preheader:
-; CHECK: br label %for.cond6.preheader
-
-; CHECK: for.cond6.preheader:
-; CHECK: %indvars.iv42 = phi i64 [ %indvars.iv.next43, %for.cond.cleanup8 ], [ 0, %for.cond6.preheader.preheader ]
-; CHECK: br label %for.body9.split1
-
-; CHECK: for.body9.preheader:
-; CHECK: br label %for.body9
-
-; CHECK: for.cond.cleanup4:
-; CHECK: %tmp = load double, double* %arrayidx, align 8
-; CHECK: call void @fn2(double %tmp)
-; CHECK: %indvars.iv.next46 = add nuw nsw i64 %indvars.iv45, 1
-; CHECK: %exitcond47 = icmp ne i64 %indvars.iv.next46, 100
-; CHECK: br i1 %exitcond47, label %for.body, label %for.cond.cleanup
-
-; CHECK: for.cond.cleanup8:
-; CHECK: %indvars.iv.next43 = add nuw nsw i64 %indvars.iv42, 1
-; CHECK: %exitcond44 = icmp ne i64 %indvars.iv.next43, 1000
-; CHECK: br i1 %exitcond44, label %for.cond6.preheader, label %for.body9.split
-
-; CHECK: for.body9:
-; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body9.split ], [ 1, %for.body9.preheader ]
-; CHECK: br label %for.cond6.preheader.preheader
-
-; CHECK: for.body9.split1:
-; CHECK: %arrayidx13 = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @Arr, i64 0, i64 %indvars.iv, i64 %indvars.iv42
-; CHECK: store i32 %add, i32* %arrayidx13, align 4
-; CHECK: br label %for.cond.cleanup8
-
-; CHECK: for.body9.split:
-; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-; CHECK: %exitcond = icmp ne i64 %indvars.iv.next, 1000
-; CHECK: br i1 %exitcond, label %for.body9, label %for.cond.cleanup4
-
-
-;;-----------------------------------Test case 10-------------------------------
-;; Test to make sure we can handle output dependencies.
-;;
-;; for (int i = 0; i < 2; ++i)
-;; for(int j = 0; j < 3; ++j) {
-;; A[j][i] = i;
-;; A[j][i+1] = j;
-;; }
-
-@A10 = local_unnamed_addr global [3 x [3 x i32]] zeroinitializer, align 16
-
-define void @interchange_10() {
-entry:
- br label %for.cond1.preheader
-
-for.cond.loopexit: ; preds = %for.body4
- %exitcond28 = icmp ne i64 %indvars.iv.next27, 2
- br i1 %exitcond28, label %for.cond1.preheader, label %for.cond.cleanup
-
-for.cond1.preheader: ; preds = %for.cond.loopexit, %entry
- %indvars.iv26 = phi i64 [ 0, %entry ], [ %indvars.iv.next27, %for.cond.loopexit ]
- %indvars.iv.next27 = add nuw nsw i64 %indvars.iv26, 1
- br label %for.body4
-
-for.cond.cleanup: ; preds = %for.cond.loopexit
- ret void
-
-for.body4: ; preds = %for.body4, %for.cond1.preheader
- %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body4 ]
- %arrayidx6 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv26
- %tmp = trunc i64 %indvars.iv26 to i32
- store i32 %tmp, i32* %arrayidx6, align 4
- %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
- %tmp1 = trunc i64 %indvars.iv to i32
- store i32 %tmp1, i32* %arrayidx10, align 4
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %exitcond = icmp ne i64 %indvars.iv.next, 3
- br i1 %exitcond, label %for.body4, label %for.cond.loopexit
-}
-
-; CHECK-LABEL: @interchange_10
-; CHECK: entry:
-; CHECK: br label %for.body4.preheader
-
-; CHECK: for.cond1.preheader.preheader:
-; CHECK: br label %for.cond1.preheader
-
-; CHECK: for.cond.loopexit:
-; CHECK: %exitcond28 = icmp ne i64 %indvars.iv.next27, 2
-; CHECK: br i1 %exitcond28, label %for.cond1.preheader, label %for.body4.split
-
-; CHECK: for.cond1.preheader:
-; CHECK: %indvars.iv26 = phi i64 [ %indvars.iv.next27, %for.cond.loopexit ], [ 0, %for.cond1.preheader.preheader ]
-; CHECK: %indvars.iv.next27 = add nuw nsw i64 %indvars.iv26, 1
-; CHECK: br label %for.body4.split1
-
-; CHECK: for.body4.preheader:
-; CHECK: br label %for.body4
-
-; CHECK: for.cond.cleanup:
-; CHECK: ret void
-
-; CHECK: for.body4:
-; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body4.split ], [ 0, %for.body4.preheader ]
-; CHECK: br label %for.cond1.preheader.preheader
-
-; CHECK: for.body4.split1:
-; CHECK: %arrayidx6 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv26
-; CHECK: %tmp = trunc i64 %indvars.iv26 to i32
-; CHECK: store i32 %tmp, i32* %arrayidx6, align 4
-; CHECK: %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
-; CHECK: %tmp1 = trunc i64 %indvars.iv to i32
-; CHECK: store i32 %tmp1, i32* %arrayidx10, align 4
-; CHECK: br label %for.cond.loopexit
-
-; CHECK: for.body4.split:
-; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-; CHECK: %exitcond = icmp ne i64 %indvars.iv.next, 3
-; CHECK: br i1 %exitcond, label %for.body4, label %for.cond.cleanup
diff --git a/test/Transforms/LoopInterchange/loop-interchange-optimization-remarks.ll b/test/Transforms/LoopInterchange/loop-interchange-optimization-remarks.ll
new file mode 100644
index 0000000000000..e14598cfdd609
--- /dev/null
+++ b/test/Transforms/LoopInterchange/loop-interchange-optimization-remarks.ll
@@ -0,0 +1,220 @@
+; Test optimization remarks generated by the LoopInterchange pass.
+;
+; RUN: opt < %s -basicaa -loop-interchange -pass-remarks-output=%t -pass-remarks-missed='loop-interchange' \
+; RUN: -pass-remarks='loop-interchange' -S
+; RUN: cat %t | FileCheck %s
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+@B = common global [100 x [100 x i32]] zeroinitializer
+@C = common global [100 x i32] zeroinitializer
+
+;;---------------------------------------Test case 01---------------------------------
+;; Loops interchange is not profitable.
+;; for(int i=1;i<N;i++)
+;; for(int j=1;j<N;j++)
+;; A[i-1][j-1] = A[i - 1][j-1] + B[i][j];
+
+define void @test01(i32 %N){
+entry:
+ %cmp31 = icmp sgt i32 %N, 1
+ br i1 %cmp31, label %for.cond1.preheader.lr.ph, label %for.end19
+
+for.cond1.preheader.lr.ph:
+ %0 = add i32 %N, -1
+ br label %for.body3.lr.ph
+
+for.body3.lr.ph:
+ %indvars.iv34 = phi i64 [ 1, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next35, %for.inc17 ]
+ %1 = add nsw i64 %indvars.iv34, -1
+ br label %for.body3
+
+for.body3:
+ %indvars.iv = phi i64 [ 1, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
+ %2 = add nsw i64 %indvars.iv, -1
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %1, i64 %2
+ %3 = load i32, i32* %arrayidx6
+ %arrayidx10 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @B, i64 0, i64 %indvars.iv34, i64 %indvars.iv
+ %4 = load i32, i32* %arrayidx10
+ %add = add nsw i32 %4, %3
+ store i32 %add, i32* %arrayidx6
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %0
+ br i1 %exitcond, label %for.inc17, label %for.body3
+
+for.inc17:
+ %indvars.iv.next35 = add nuw nsw i64 %indvars.iv34, 1
+ %lftr.wideiv37 = trunc i64 %indvars.iv34 to i32
+ %exitcond38 = icmp eq i32 %lftr.wideiv37, %0
+ br i1 %exitcond38, label %for.end19, label %for.body3.lr.ph
+
+for.end19:
+ ret void
+}
+
+; CHECK: --- !Missed
+; CHECK-NEXT: Pass: loop-interchange
+; CHECK-NEXT: Name: InterchangeNotProfitable
+; CHECK-NEXT: Function: test01
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - String: 'Interchanging loops is too costly (cost='
+; CHECK-NEXT: - Cost: '2'
+; CHECK-NEXT: - String: ', threshold='
+; CHECK-NEXT: - Threshold: '0'
+; CHECK-NEXT: - String: ') and it does not improve parallelism.'
+; CHECK-NEXT: ...
+
+;;--------------------------------------Test case 02------------------------------------
+;; [FIXME] This loop though valid is currently not interchanged due to the
+;; limitation that we cannot split the inner loop latch due to multiple use of inner induction
+;; variable.(used to increment the loop counter and to access A[j+1][i+1]
+;; for(int i=0;i<N-1;i++)
+;; for(int j=1;j<N-1;j++)
+;; A[j+1][i+1] = A[j+1][i+1] + k;
+
+define void @test02(i32 %k, i32 %N) {
+ entry:
+ %sub = add nsw i32 %N, -1
+ %cmp26 = icmp sgt i32 %N, 1
+ br i1 %cmp26, label %for.cond1.preheader.lr.ph, label %for.end17
+
+ for.cond1.preheader.lr.ph:
+ %cmp324 = icmp sgt i32 %sub, 1
+ %0 = add i32 %N, -2
+ %1 = sext i32 %sub to i64
+ br label %for.cond1.preheader
+
+ for.cond.loopexit:
+ %cmp = icmp slt i64 %indvars.iv.next29, %1
+ br i1 %cmp, label %for.cond1.preheader, label %for.end17
+
+ for.cond1.preheader:
+ %indvars.iv28 = phi i64 [ 0, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next29, %for.cond.loopexit ]
+ %indvars.iv.next29 = add nuw nsw i64 %indvars.iv28, 1
+ br i1 %cmp324, label %for.body4, label %for.cond.loopexit
+
+ for.body4:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body4 ], [ 1, %for.cond1.preheader ]
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv.next, i64 %indvars.iv.next29
+ %2 = load i32, i32* %arrayidx7
+ %add8 = add nsw i32 %2, %k
+ store i32 %add8, i32* %arrayidx7
+ %lftr.wideiv = trunc i64 %indvars.iv to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %0
+ br i1 %exitcond, label %for.cond.loopexit, label %for.body4
+
+ for.end17:
+ ret void
+}
+
+; CHECK: --- !Missed
+; CHECK-NEXT: Pass: loop-interchange
+; CHECK-NEXT: Name: UnsupportedInsBetweenInduction
+; CHECK-NEXT: Function: test02
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - String: Found unsupported instruction between induction variable increment and branch.
+; CHECK-NEXT: ...
+
+;;-----------------------------------Test case 03-------------------------------
+;; Test to make sure we can handle output dependencies.
+;;
+;; for (int i = 0; i < 2; ++i)
+;; for(int j = 0; j < 3; ++j) {
+;; A[j][i] = i;
+;; A[j][i+1] = j;
+;; }
+
+@A10 = local_unnamed_addr global [3 x [3 x i32]] zeroinitializer, align 16
+
+define void @test03() {
+entry:
+ br label %for.cond1.preheader
+
+for.cond.loopexit: ; preds = %for.body4
+ %exitcond28 = icmp ne i64 %indvars.iv.next27, 2
+ br i1 %exitcond28, label %for.cond1.preheader, label %for.cond.cleanup
+
+for.cond1.preheader: ; preds = %for.cond.loopexit, %entry
+ %indvars.iv26 = phi i64 [ 0, %entry ], [ %indvars.iv.next27, %for.cond.loopexit ]
+ %indvars.iv.next27 = add nuw nsw i64 %indvars.iv26, 1
+ br label %for.body4
+
+for.cond.cleanup: ; preds = %for.cond.loopexit
+ ret void
+
+for.body4: ; preds = %for.body4, %for.cond1.preheader
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body4 ]
+ %arrayidx6 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv26
+ %tmp = trunc i64 %indvars.iv26 to i32
+ store i32 %tmp, i32* %arrayidx6, align 4
+ %arrayidx10 = getelementptr inbounds [3 x [3 x i32]], [3 x [3 x i32]]* @A10, i64 0, i64 %indvars.iv, i64 %indvars.iv.next27
+ %tmp1 = trunc i64 %indvars.iv to i32
+ store i32 %tmp1, i32* %arrayidx10, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp ne i64 %indvars.iv.next, 3
+ br i1 %exitcond, label %for.body4, label %for.cond.loopexit
+}
+
+; CHECK: --- !Passed
+; CHECK-NEXT: Pass: loop-interchange
+; CHECK-NEXT: Name: Interchanged
+; CHECK-NEXT: Function: test03
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - String: Loop interchanged with enclosing loop.
+; CHECK-NEXT: ...
+
+;;--------------------------------------Test case 04-------------------------------------
+;; Loops not tightly nested are not interchanged
+;; for(int j=0;j<N;j++) {
+;; B[j] = j+k;
+;; for(int i=0;i<N;i++)
+;; A[j][i] = A[j][i]+B[j];
+;; }
+
+define void @test04(i32 %k, i32 %N){
+entry:
+ %cmp30 = icmp sgt i32 %N, 0
+ br i1 %cmp30, label %for.body.lr.ph, label %for.end17
+
+for.body.lr.ph:
+ %0 = add i32 %N, -1
+ %1 = zext i32 %k to i64
+ br label %for.body
+
+for.body:
+ %indvars.iv32 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next33, %for.inc15 ]
+ %2 = add nsw i64 %indvars.iv32, %1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @C, i64 0, i64 %indvars.iv32
+ %3 = trunc i64 %2 to i32
+ store i32 %3, i32* %arrayidx
+ br label %for.body3
+
+for.body3:
+ %indvars.iv = phi i64 [ 0, %for.body ], [ %indvars.iv.next, %for.body3 ]
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
+ %4 = load i32, i32* %arrayidx7
+ %add10 = add nsw i32 %3, %4
+ store i32 %add10, i32* %arrayidx7
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %0
+ br i1 %exitcond, label %for.inc15, label %for.body3
+
+for.inc15:
+ %indvars.iv.next33 = add nuw nsw i64 %indvars.iv32, 1
+ %lftr.wideiv35 = trunc i64 %indvars.iv32 to i32
+ %exitcond36 = icmp eq i32 %lftr.wideiv35, %0
+ br i1 %exitcond36, label %for.end17, label %for.body
+
+for.end17:
+ ret void
+}
+
+; CHECK: --- !Missed
+; CHECK-NEXT: Pass: loop-interchange
+; CHECK-NEXT: Name: NotTightlyNested
+; CHECK-NEXT: Function: test04
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - String: Cannot interchange loops because they are not tightly nested.
+; CHECK-NEXT: ...
diff --git a/test/Transforms/LoopInterchange/not-interchanged-dependencies-1.ll b/test/Transforms/LoopInterchange/not-interchanged-dependencies-1.ll
new file mode 100644
index 0000000000000..cf4f83baea82b
--- /dev/null
+++ b/test/Transforms/LoopInterchange/not-interchanged-dependencies-1.ll
@@ -0,0 +1,64 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+@B = common global [100 x i32] zeroinitializer
+
+;; Loops should not be interchanged in this case as it is not legal due to dependency.
+;; for(int j=0;j<99;j++)
+;; for(int i=0;i<99;i++)
+;; A[j][i+1] = A[j+1][i]+k;
+
+define void @interchange_04(i32 %k){
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader:
+ %indvars.iv23 = phi i64 [ 0, %entry ], [ %indvars.iv.next24, %for.inc12 ]
+ %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
+ br label %for.body3
+
+for.body3:
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv.next24, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx5
+ %add6 = add nsw i32 %0, %k
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv23, i64 %indvars.iv.next
+ store i32 %add6, i32* %arrayidx11
+ %exitcond = icmp eq i64 %indvars.iv.next, 99
+ br i1 %exitcond, label %for.inc12, label %for.body3
+
+for.inc12:
+ %exitcond25 = icmp eq i64 %indvars.iv.next24, 99
+ br i1 %exitcond25, label %for.end14, label %for.cond1.preheader
+
+for.end14:
+ ret void
+}
+
+; CHECK-LABEL: @interchange_04
+; CHECK: entry:
+; CHECK: br label %for.cond1.preheader
+; CHECK: for.cond1.preheader: ; preds = %for.inc12, %entry
+; CHECK: %indvars.iv23 = phi i64 [ 0, %entry ], [ %indvars.iv.next24, %for.inc12 ]
+; CHECK: %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
+; CHECK: br label %for.body3
+; CHECK: for.body3: ; preds = %for.body3, %for.cond1.preheader
+; CHECK: %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
+; CHECK: %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv.next24, i64 %indvars.iv
+; CHECK: %0 = load i32, i32* %arrayidx5
+; CHECK: %add6 = add nsw i32 %0, %k
+; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+; CHECK: %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv23, i64 %indvars.iv.next
+; CHECK: store i32 %add6, i32* %arrayidx11
+; CHECK: %exitcond = icmp eq i64 %indvars.iv.next, 99
+; CHECK: br i1 %exitcond, label %for.inc12, label %for.body3
+; CHECK: for.inc12: ; preds = %for.body3
+; CHECK: %exitcond25 = icmp eq i64 %indvars.iv.next24, 99
+; CHECK: br i1 %exitcond25, label %for.end14, label %for.cond1.preheader
+; CHECK: for.end14: ; preds = %for.inc12
+; CHECK: ret void
diff --git a/test/Transforms/LoopInterchange/not-interchanged-loop-nest-3.ll b/test/Transforms/LoopInterchange/not-interchanged-loop-nest-3.ll
new file mode 100644
index 0000000000000..1d4d22883a4f8
--- /dev/null
+++ b/test/Transforms/LoopInterchange/not-interchanged-loop-nest-3.ll
@@ -0,0 +1,87 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@D = common global [100 x [100 x [100 x i32]]] zeroinitializer
+
+;; Test for interchange in loop nest greater than 2.
+;; for(int i=0;i<100;i++)
+;; for(int j=0;j<100;j++)
+;; for(int k=0;k<100;k++)
+;; D[i][k][j] = D[i][k][j]+t;
+
+define void @interchange_08(i32 %t){
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc15, %entry
+ %i.028 = phi i32 [ 0, %entry ], [ %inc16, %for.inc15 ]
+ br label %for.cond4.preheader
+
+for.cond4.preheader: ; preds = %for.inc12, %for.cond1.preheader
+ %j.027 = phi i32 [ 0, %for.cond1.preheader ], [ %inc13, %for.inc12 ]
+ br label %for.body6
+
+for.body6: ; preds = %for.body6, %for.cond4.preheader
+ %k.026 = phi i32 [ 0, %for.cond4.preheader ], [ %inc, %for.body6 ]
+ %arrayidx8 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* @D, i32 0, i32 %i.028, i32 %k.026, i32 %j.027
+ %0 = load i32, i32* %arrayidx8
+ %add = add nsw i32 %0, %t
+ store i32 %add, i32* %arrayidx8
+ %inc = add nuw nsw i32 %k.026, 1
+ %exitcond = icmp eq i32 %inc, 100
+ br i1 %exitcond, label %for.inc12, label %for.body6
+
+for.inc12: ; preds = %for.body6
+ %inc13 = add nuw nsw i32 %j.027, 1
+ %exitcond29 = icmp eq i32 %inc13, 100
+ br i1 %exitcond29, label %for.inc15, label %for.cond4.preheader
+
+for.inc15: ; preds = %for.inc12
+ %inc16 = add nuw nsw i32 %i.028, 1
+ %exitcond30 = icmp eq i32 %inc16, 100
+ br i1 %exitcond30, label %for.end17, label %for.cond1.preheader
+
+for.end17: ; preds = %for.inc15
+ ret void
+}
+; CHECK-LABEL: @interchange_08
+; CHECK: entry:
+; CHECK: br label %for.cond1.preheader.preheader
+; CHECK: for.cond1.preheader.preheader: ; preds = %entry
+; CHECK: br label %for.cond1.preheader
+; CHECK: for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc15
+; CHECK: %i.028 = phi i32 [ %inc16, %for.inc15 ], [ 0, %for.cond1.preheader.preheader ]
+; CHECK: br label %for.body6.preheader
+; CHECK: for.cond4.preheader.preheader: ; preds = %for.body6
+; CHECK: br label %for.cond4.preheader
+; CHECK: for.cond4.preheader: ; preds = %for.cond4.preheader.preheader, %for.inc12
+; CHECK: %j.027 = phi i32 [ %inc13, %for.inc12 ], [ 0, %for.cond4.preheader.preheader ]
+; CHECK: br label %for.body6.split1
+; CHECK: for.body6.preheader: ; preds = %for.cond1.preheader
+; CHECK: br label %for.body6
+; CHECK: for.body6: ; preds = %for.body6.preheader, %for.body6.split
+; CHECK: %k.026 = phi i32 [ %inc, %for.body6.split ], [ 0, %for.body6.preheader ]
+; CHECK: br label %for.cond4.preheader.preheader
+; CHECK: for.body6.split1: ; preds = %for.cond4.preheader
+; CHECK: %arrayidx8 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* @D, i32 0, i32 %i.028, i32 %k.026, i32 %j.027
+; CHECK: %0 = load i32, i32* %arrayidx8
+; CHECK: %add = add nsw i32 %0, %t
+; CHECK: store i32 %add, i32* %arrayidx8
+; CHECK: br label %for.inc12
+; CHECK: for.body6.split: ; preds = %for.inc12
+; CHECK: %inc = add nuw nsw i32 %k.026, 1
+; CHECK: %exitcond = icmp eq i32 %inc, 100
+; CHECK: br i1 %exitcond, label %for.inc15, label %for.body6
+; CHECK: for.inc12: ; preds = %for.body6.split1
+; CHECK: %inc13 = add nuw nsw i32 %j.027, 1
+; CHECK: %exitcond29 = icmp eq i32 %inc13, 100
+; CHECK: br i1 %exitcond29, label %for.body6.split, label %for.cond4.preheader
+; CHECK: for.inc15: ; preds = %for.body6.split
+; CHECK: %inc16 = add nuw nsw i32 %i.028, 1
+; CHECK: %exitcond30 = icmp eq i32 %inc16, 100
+; CHECK: br i1 %exitcond30, label %for.end17, label %for.cond1.preheader
+; CHECK: for.end17: ; preds = %for.inc15
+; CHECK: ret void
diff --git a/test/Transforms/LoopInterchange/not-interchanged-tightly-nested.ll b/test/Transforms/LoopInterchange/not-interchanged-tightly-nested.ll
new file mode 100644
index 0000000000000..0cf91b09e65db
--- /dev/null
+++ b/test/Transforms/LoopInterchange/not-interchanged-tightly-nested.ll
@@ -0,0 +1,143 @@
+; RUN: opt < %s -basicaa -loop-interchange -S | FileCheck %s
+;; We test the complete .ll for adjustment in outer loop header/latch and inner loop header/latch.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@A = common global [100 x [100 x i32]] zeroinitializer
+@B = common global [100 x i32] zeroinitializer
+@C = common global [100 x [100 x i32]] zeroinitializer
+@D = common global [100 x [100 x [100 x i32]]] zeroinitializer
+
+;; Loops not tightly nested are not interchanged
+;; for(int j=0;j<N;j++) {
+;; B[j] = j+k;
+;; for(int i=0;i<N;i++)
+;; A[j][i] = A[j][i]+B[j];
+;; }
+
+define void @interchange_05(i32 %k, i32 %N){
+entry:
+ %cmp30 = icmp sgt i32 %N, 0
+ br i1 %cmp30, label %for.body.lr.ph, label %for.end17
+
+for.body.lr.ph:
+ %0 = add i32 %N, -1
+ %1 = zext i32 %k to i64
+ br label %for.body
+
+for.body:
+ %indvars.iv32 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next33, %for.inc15 ]
+ %2 = add nsw i64 %indvars.iv32, %1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @B, i64 0, i64 %indvars.iv32
+ %3 = trunc i64 %2 to i32
+ store i32 %3, i32* %arrayidx
+ br label %for.body3
+
+for.body3:
+ %indvars.iv = phi i64 [ 0, %for.body ], [ %indvars.iv.next, %for.body3 ]
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
+ %4 = load i32, i32* %arrayidx7
+ %add10 = add nsw i32 %3, %4
+ store i32 %add10, i32* %arrayidx7
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %0
+ br i1 %exitcond, label %for.inc15, label %for.body3
+
+for.inc15:
+ %indvars.iv.next33 = add nuw nsw i64 %indvars.iv32, 1
+ %lftr.wideiv35 = trunc i64 %indvars.iv32 to i32
+ %exitcond36 = icmp eq i32 %lftr.wideiv35, %0
+ br i1 %exitcond36, label %for.end17, label %for.body
+
+for.end17:
+ ret void
+}
+
+; CHECK-LABEL: @interchange_05
+; CHECK: entry:
+; CHECK: %cmp30 = icmp sgt i32 %N, 0
+; CHECK: br i1 %cmp30, label %for.body.lr.ph, label %for.end17
+; CHECK: for.body.lr.ph:
+; CHECK: %0 = add i32 %N, -1
+; CHECK: %1 = zext i32 %k to i64
+; CHECK: br label %for.body
+; CHECK: for.body:
+; CHECK: %indvars.iv32 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next33, %for.inc15 ]
+; CHECK: %2 = add nsw i64 %indvars.iv32, %1
+; CHECK: %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @B, i64 0, i64 %indvars.iv32
+; CHECK: %3 = trunc i64 %2 to i32
+; CHECK: store i32 %3, i32* %arrayidx
+; CHECK: br label %for.body3.preheader
+; CHECK: for.body3.preheader:
+; CHECK: br label %for.body3
+; CHECK: for.body3:
+; CHECK: %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 0, %for.body3.preheader ]
+; CHECK: %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv32, i64 %indvars.iv
+; CHECK: %4 = load i32, i32* %arrayidx7
+; CHECK: %add10 = add nsw i32 %3, %4
+; CHECK: store i32 %add10, i32* %arrayidx7
+; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+; CHECK: %lftr.wideiv = trunc i64 %indvars.iv to i32
+; CHECK: %exitcond = icmp eq i32 %lftr.wideiv, %0
+; CHECK: br i1 %exitcond, label %for.inc15, label %for.body3
+; CHECK: for.inc15:
+; CHECK: %indvars.iv.next33 = add nuw nsw i64 %indvars.iv32, 1
+; CHECK: %lftr.wideiv35 = trunc i64 %indvars.iv32 to i32
+; CHECK: %exitcond36 = icmp eq i32 %lftr.wideiv35, %0
+; CHECK: br i1 %exitcond36, label %for.end17.loopexit, label %for.body
+; CHECK: for.end17.loopexit:
+; CHECK: br label %for.end17
+; CHECK: for.end17:
+; CHECK: ret void
+
+
+declare void @foo(...)
+
+;; Loops not tightly nested are not interchanged
+;; for(int j=0;j<N;j++) {
+;; foo();
+;; for(int i=2;i<N;i++)
+;; A[j][i] = A[j][i]+k;
+;; }
+
+define void @interchange_06(i32 %k, i32 %N) {
+entry:
+ %cmp22 = icmp sgt i32 %N, 0
+ br i1 %cmp22, label %for.body.lr.ph, label %for.end12
+
+for.body.lr.ph:
+ %0 = add i32 %N, -1
+ br label %for.body
+
+for.body:
+ %indvars.iv24 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next25, %for.inc10 ]
+ tail call void (...) @foo()
+ br label %for.body3
+
+for.body3:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 2, %for.body ]
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* @A, i64 0, i64 %indvars.iv24, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx5
+ %add = add nsw i32 %1, %k
+ store i32 %add, i32* %arrayidx5
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %0
+ br i1 %exitcond, label %for.inc10, label %for.body3
+
+for.inc10:
+ %indvars.iv.next25 = add nuw nsw i64 %indvars.iv24, 1
+ %lftr.wideiv26 = trunc i64 %indvars.iv24 to i32
+ %exitcond27 = icmp eq i32 %lftr.wideiv26, %0
+ br i1 %exitcond27, label %for.end12, label %for.body
+
+for.end12:
+ ret void
+}
+;; Here we are checking if the inner phi is not split then we have not interchanged.
+; CHECK-LABEL: @interchange_06
+; CHECK: phi i64 [ %indvars.iv.next, %for.body3 ], [ 2, %for.body3.preheader ]
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: %1 = load
diff --git a/test/Transforms/LoopUnroll/runtime-loop-multiexit-dom-verify.ll b/test/Transforms/LoopUnroll/runtime-loop-multiexit-dom-verify.ll
new file mode 100644
index 0000000000000..6014775028ee5
--- /dev/null
+++ b/test/Transforms/LoopUnroll/runtime-loop-multiexit-dom-verify.ll
@@ -0,0 +1,126 @@
+; RUN: opt < %s -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=false -unroll-runtime-multi-exit=true -unroll-count=4 -verify-dom-info -S | FileCheck %s
+
+; REQUIRES: asserts
+; The tests below are for verifying dom tree after runtime unrolling
+; with multiple exit/exiting blocks.
+
+; We explicitly set the unroll count so that expensiveTripCount computation is allowed.
+
+; mergedexit block has edges from loop exit blocks.
+define i64 @test1() {
+; CHECK-LABEL: test1(
+; CHECK-LABEL: headerexit:
+; CHECK-NEXT: %addphi = phi i64 [ %add.iv, %header ], [ %add.iv.1, %header.1 ], [ %add.iv.2, %header.2 ], [ %add.iv.3, %header.3 ]
+; CHECK-NEXT: br label %mergedexit
+; CHECK-LABEL: latchexit:
+; CHECK-NEXT: %shftphi = phi i64 [ %shft, %latch ], [ %shft.1, %latch.1 ], [ %shft.2, %latch.2 ], [ %shft.3, %latch.3 ]
+; CHECK-NEXT: br label %mergedexit
+; CHECK-LABEL: mergedexit:
+; CHECK-NEXT: %retval = phi i64 [ %addphi, %headerexit ], [ %shftphi, %latchexit ]
+; CHECK-NEXT: ret i64 %retval
+entry:
+ br label %preheader
+
+preheader: ; preds = %bb
+ %trip = zext i32 undef to i64
+ br label %header
+
+header: ; preds = %latch, %preheader
+ %iv = phi i64 [ 2, %preheader ], [ %add.iv, %latch ]
+ %add.iv = add nuw nsw i64 %iv, 2
+ %cmp1 = icmp ult i64 %add.iv, %trip
+ br i1 %cmp1, label %latch, label %headerexit
+
+latch: ; preds = %header
+ %shft = ashr i64 %add.iv, 1
+ %cmp2 = icmp ult i64 %shft, %trip
+ br i1 %cmp2, label %header, label %latchexit
+
+headerexit: ; preds = %header
+ %addphi = phi i64 [ %add.iv, %header ]
+ br label %mergedexit
+
+latchexit: ; preds = %latch
+ %shftphi = phi i64 [ %shft, %latch ]
+ br label %mergedexit
+
+mergedexit: ; preds = %latchexit, %headerexit
+ %retval = phi i64 [ %addphi, %headerexit ], [ %shftphi, %latchexit ]
+ ret i64 %retval
+}
+
+; mergedexit has edges from loop exit blocks and a block outside the loop.
+define void @test2(i1 %cond, i32 %n) {
+; CHECK-LABEL: header.1:
+; CHECK-NEXT: %add.iv.1 = add nuw nsw i64 %add.iv, 2
+; CHECK: br i1 %cmp1.1, label %latch.1, label %headerexit
+; CHECK-LABEL: latch.3:
+; CHECK: %cmp2.3 = icmp ult i64 %shft.3, %trip
+; CHECK-NEXT: br i1 %cmp2.3, label %header, label %latchexit, !llvm.loop
+entry:
+ br i1 %cond, label %preheader, label %mergedexit
+
+preheader: ; preds = %entry
+ %trip = zext i32 %n to i64
+ br label %header
+
+header: ; preds = %latch, %preheader
+ %iv = phi i64 [ 2, %preheader ], [ %add.iv, %latch ]
+ %add.iv = add nuw nsw i64 %iv, 2
+ %cmp1 = icmp ult i64 %add.iv, %trip
+ br i1 %cmp1, label %latch, label %headerexit
+
+latch: ; preds = %header
+ %shft = ashr i64 %add.iv, 1
+ %cmp2 = icmp ult i64 %shft, %trip
+ br i1 %cmp2, label %header, label %latchexit
+
+headerexit: ; preds = %header
+ br label %mergedexit
+
+latchexit: ; preds = %latch
+ br label %mergedexit
+
+mergedexit: ; preds = %latchexit, %headerexit, %entry
+ ret void
+}
+
+
+; exitsucc is from loop exit block only.
+define i64 @test3(i32 %n) {
+; CHECK-LABEL: test3(
+; CHECK-LABEL: headerexit:
+; CHECK-NEXT: br label %exitsucc
+; CHECK-LABEL: latchexit:
+; CHECK-NEXT: %shftphi = phi i64 [ %shft, %latch ], [ %shft.1, %latch.1 ], [ %shft.2, %latch.2 ], [ %shft.3, %latch.3 ]
+; CHECK-NEXT: ret i64 %shftphi
+; CHECK-LABEL: exitsucc:
+; CHECK-NEXT: ret i64 96
+entry:
+ br label %preheader
+
+preheader: ; preds = %bb
+ %trip = zext i32 %n to i64
+ br label %header
+
+header: ; preds = %latch, %preheader
+ %iv = phi i64 [ 2, %preheader ], [ %add.iv, %latch ]
+ %add.iv = add nuw nsw i64 %iv, 2
+ %cmp1 = icmp ult i64 %add.iv, %trip
+ br i1 %cmp1, label %latch, label %headerexit
+
+latch: ; preds = %header
+ %shft = ashr i64 %add.iv, 1
+ %cmp2 = icmp ult i64 %shft, %trip
+ br i1 %cmp2, label %header, label %latchexit
+
+headerexit: ; preds = %header
+ br label %exitsucc
+
+latchexit: ; preds = %latch
+ %shftphi = phi i64 [ %shft, %latch ]
+ ret i64 %shftphi
+
+exitsucc: ; preds = %headerexit
+ ret i64 96
+}
diff --git a/test/Transforms/LoopVectorize/X86/float-induction-x86.ll b/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
index b5e914500fb4a..31c564779fb24 100644
--- a/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
+++ b/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
@@ -86,10 +86,10 @@ for.end: ; preds = %for.end.loopexit, %
; AUTO_VEC-NEXT: entry:
; AUTO_VEC-NEXT: [[TMP0:%.*]] = icmp sgt i64 %n, 1
; AUTO_VEC-NEXT: [[SMAX:%.*]] = select i1 [[TMP0]], i64 %n, i64 1
-; AUTO_VEC: br i1 {{.*}}, label %for.body, label %min.iters.checked
-; AUTO_VEC: min.iters.checked:
+; AUTO_VEC: br i1 {{.*}}, label %for.body, label %vector.ph
+; AUTO_VEC: vector.ph:
; AUTO_VEC-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], 9223372036854775792
-; AUTO_VEC: br i1 {{.*}}, label %for.body, label %vector.body
+; AUTO_VEC: br label %vector.body
; AUTO_VEC: middle.block:
; AUTO_VEC: [[TMP11:%.*]] = add nsw i64 [[N_VEC]], -1
; AUTO_VEC-NEXT: [[CAST_CMO:%.*]] = sitofp i64 [[TMP11]] to double
diff --git a/test/Transforms/LoopVectorize/debugloc.ll b/test/Transforms/LoopVectorize/debugloc.ll
index 49d88323523c1..f2d68fb4e62bd 100644
--- a/test/Transforms/LoopVectorize/debugloc.ll
+++ b/test/Transforms/LoopVectorize/debugloc.ll
@@ -5,7 +5,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Make sure we are preserving debug info in the vectorized code.
; CHECK: for.body.lr.ph
-; CHECK: cmp.zero = icmp eq i64 {{.*}}, 0, !dbg !{{[0-9]+}}
+; CHECK: min.iters.check = icmp ult i64 {{.*}}, 2, !dbg !{{[0-9]+}}
; CHECK: vector.body
; CHECK: index {{.*}}, !dbg ![[LOC:[0-9]+]]
; CHECK: getelementptr inbounds i32, i32* %a, {{.*}}, !dbg ![[LOC]]
diff --git a/test/Transforms/LoopVectorize/first-order-recurrence.ll b/test/Transforms/LoopVectorize/first-order-recurrence.ll
index 0ff94c1450acf..508938958d59a 100644
--- a/test/Transforms/LoopVectorize/first-order-recurrence.ll
+++ b/test/Transforms/LoopVectorize/first-order-recurrence.ll
@@ -22,7 +22,7 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
; CHECK: middle.block:
; CHECK: %vector.recur.extract = extractelement <4 x i32> [[L1]], i32 3
; CHECK: scalar.ph:
-; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %pre_load, %vector.memcheck ], [ %pre_load, %min.iters.checked ], [ %pre_load, %for.preheader ]
+; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %pre_load, %vector.memcheck ], [ %pre_load, %for.preheader ]
; CHECK: scalar.body:
; CHECK: %scalar.recur = phi i32 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
;
@@ -79,7 +79,7 @@ for.exit:
; CHECK: middle.block:
; CHECK: %vector.recur.extract = extractelement <4 x i32> [[L1]], i32 3
; CHECK: scalar.ph:
-; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %.pre, %min.iters.checked ], [ %.pre, %for.preheader ]
+; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %.pre, %for.preheader ]
; CHECK: scalar.body:
; CHECK: %scalar.recur = phi i32 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
;
@@ -144,7 +144,7 @@ scalar.body:
; CHECK: middle.block:
; CHECK: %vector.recur.extract = extractelement <4 x i16> [[L1]], i32 3
; CHECK: scalar.ph:
-; CHECK: %scalar.recur.init = phi i16 [ %vector.recur.extract, %middle.block ], [ %0, %vector.memcheck ], [ %0, %min.iters.checked ], [ %0, %for.preheader ]
+; CHECK: %scalar.recur.init = phi i16 [ %vector.recur.extract, %middle.block ], [ %0, %vector.memcheck ], [ %0, %for.preheader ]
; CHECK: scalar.body:
; CHECK: %scalar.recur = phi i16 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
;
@@ -288,7 +288,7 @@ for.cond.cleanup3:
; UNROLL-NO-IC-LABEL: @PR30183(
; UNROLL-NO-IC: vector.ph:
-; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> undef, i32 [[PRE_LOAD:%.*]], i32 3
+; UNROLL-NO-IC: [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> undef, i32 [[PRE_LOAD:%.*]], i32 3
; UNROLL-NO-IC-NEXT: br label %vector.body
; UNROLL-NO-IC: vector.body:
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
diff --git a/test/Transforms/LoopVectorize/float-induction.ll b/test/Transforms/LoopVectorize/float-induction.ll
index 8eec6e262c1a1..a7cc4530ceb39 100644
--- a/test/Transforms/LoopVectorize/float-induction.ll
+++ b/test/Transforms/LoopVectorize/float-induction.ll
@@ -15,7 +15,7 @@
; VEC4_INTERL1-LABEL: @fp_iv_loop1(
; VEC4_INTERL1: vector.ph:
-; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
+; VEC4_INTERL1: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> undef, float %fpinc, i32 0
; VEC4_INTERL1-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT2]], <4 x float> undef, <4 x i32> zeroinitializer
@@ -37,7 +37,7 @@
; VEC4_INTERL2-LABEL: @fp_iv_loop1(
; VEC4_INTERL2: vector.ph:
-; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
+; VEC4_INTERL2: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT3:%.*]] = insertelement <4 x float> undef, float %fpinc, i32 0
; VEC4_INTERL2-NEXT: [[DOTSPLAT4:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT3]], <4 x float> undef, <4 x i32> zeroinitializer
@@ -63,7 +63,7 @@
; VEC1_INTERL2-LABEL: @fp_iv_loop1(
; VEC1_INTERL2: vector.ph:
-; VEC1_INTERL2-NEXT: br label %vector.body
+; VEC1_INTERL2: br label %vector.body
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
@@ -115,7 +115,7 @@ for.end: ; preds = %for.end.loopexit, %
; VEC4_INTERL1-LABEL: @fp_iv_loop2(
; VEC4_INTERL1: vector.ph:
-; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
+; VEC4_INTERL1: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
; VEC4_INTERL1-NEXT: [[INDUCTION2:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], <float 0.000000e+00, float 5.000000e-01, float 1.000000e+00, float 1.500000e+00>
; VEC4_INTERL1-NEXT: br label %vector.body
@@ -172,7 +172,7 @@ for.end: ; preds = %for.end.loopexit, %
; VEC4_INTERL1: for.body.lr.ph:
; VEC4_INTERL1: [[TMP0:%.*]] = load float, float* @fp_inc, align 4
; VEC4_INTERL1: vector.ph:
-; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
+; VEC4_INTERL1: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <4 x float> undef, float [[TMP0]], i32 0
; VEC4_INTERL1-NEXT: [[DOTSPLAT6:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT5]], <4 x float> undef, <4 x i32> zeroinitializer
@@ -250,7 +250,7 @@ for.end:
; VEC4_INTERL1-LABEL: @fp_iv_loop4(
; VEC4_INTERL1: vector.ph:
-; VEC4_INTERL1-NEXT: br label %vector.body
+; VEC4_INTERL1: br label %vector.body
; VEC4_INTERL1: vector.body:
; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ <float 1.000000e+00, float 1.500000e+00, float 2.000000e+00, float 2.500000e+00>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
@@ -289,7 +289,7 @@ for.end: ; preds = %for.end.loopexit, %
; VEC2_INTERL1_PRED_STORE-LABEL: @non_primary_iv_float_scalar(
; VEC2_INTERL1_PRED_STORE: vector.body:
-; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE7:.*]] ], [ 0, %min.iters.checked ]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE7:.*]] ]
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP1:%.*]] = sitofp i64 [[INDEX]] to float
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDEX]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <2 x float>*
diff --git a/test/Transforms/LoopVectorize/if-conversion-nest.ll b/test/Transforms/LoopVectorize/if-conversion-nest.ll
index 7f381ae6ad7b5..0d6e4b1e61b44 100644
--- a/test/Transforms/LoopVectorize/if-conversion-nest.ll
+++ b/test/Transforms/LoopVectorize/if-conversion-nest.ll
@@ -13,24 +13,21 @@ define i32 @foo(i32* nocapture %A, i32* nocapture %B, i32 %n) {
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[MIN_ITERS_CHECKED:%.*]]
-; CHECK: min.iters.checked:
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[N]], 3
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = zext i32 [[TMP3]] to i64
-; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[TMP2]], [[N_MOD_VF]]
-; CHECK-NEXT: [[CMP_ZERO:%.*]] = icmp eq i64 [[N_VEC]], 0
-; CHECK-NEXT: br i1 [[CMP_ZERO]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
-; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 1
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i32, i32* [[B:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[N]], -1
+; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[TMP4]], 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[TMP5]]
+; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i32, i32* [[B:%.*]], i64 [[TMP5]]
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i32* [[SCEVGEP4]], [[A]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[B]]
; CHECK-NEXT: [[MEMCHECK_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[MEMCHECK_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[N]], 3
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -55,10 +52,10 @@ define i32 @foo(i32* nocapture %A, i32* nocapture %B, i32 %n) {
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !5
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP3]], 0
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ], [ 0, [[MIN_ITERS_CHECKED]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END14:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
diff --git a/test/Transforms/LoopVectorize/induction-step.ll b/test/Transforms/LoopVectorize/induction-step.ll
index 33e8ed067160d..b37537efcc513 100644
--- a/test/Transforms/LoopVectorize/induction-step.ll
+++ b/test/Transforms/LoopVectorize/induction-step.ll
@@ -15,7 +15,7 @@
; CHECK: for.body.lr.ph:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @int_inc, align 4
; CHECK: vector.ph:
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i32> undef, i32 %init, i32 0
+; CHECK: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i32> undef, i32 %init, i32 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT]], <8 x i32> undef, <8 x i32> zeroinitializer
; CHECK-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <8 x i32> undef, i32 [[TMP0]], i32 0
; CHECK-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT2]], <8 x i32> undef, <8 x i32> zeroinitializer
@@ -86,7 +86,7 @@ for.end: ; preds = %for.end.loopexit, %
; CHECK-LABEL: @induction_with_loop_inv(
; CHECK: vector.ph:
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i32> undef, i32 %x.011, i32 0
+; CHECK: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i32> undef, i32 %x.011, i32 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT]], <8 x i32> undef, <8 x i32> zeroinitializer
; CHECK-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <8 x i32> undef, i32 %j.012, i32 0
; CHECK-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT2]], <8 x i32> undef, <8 x i32> zeroinitializer
diff --git a/test/Transforms/LoopVectorize/induction.ll b/test/Transforms/LoopVectorize/induction.ll
index 7e9e6b1cdc8e3..d77806da59bed 100644
--- a/test/Transforms/LoopVectorize/induction.ll
+++ b/test/Transforms/LoopVectorize/induction.ll
@@ -501,13 +501,13 @@ define i32 @i16_loop() nounwind readnone ssp uwtable {
; condition and branch directly to the scalar loop.
; CHECK-LABEL: max_i32_backedgetaken
-; CHECK: br i1 true, label %scalar.ph, label %min.iters.checked
+; CHECK: br i1 true, label %scalar.ph, label %vector.ph
; CHECK: middle.block:
; CHECK: %[[v9:.+]] = extractelement <2 x i32> %bin.rdx, i32 0
; CHECK: scalar.ph:
; CHECK: %bc.resume.val = phi i32 [ 0, %middle.block ], [ 0, %[[v0:.+]] ]
-; CHECK: %bc.merge.rdx = phi i32 [ 1, %[[v0:.+]] ], [ 1, %min.iters.checked ], [ %[[v9]], %middle.block ]
+; CHECK: %bc.merge.rdx = phi i32 [ 1, %[[v0:.+]] ], [ %[[v9]], %middle.block ]
define i32 @max_i32_backedgetaken() nounwind readnone ssp uwtable {
diff --git a/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll b/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
index 1e8b982363d80..89c0ac1091676 100644
--- a/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
+++ b/test/Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll
@@ -9,7 +9,7 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
;
; CHECK-LABEL: @interleaved_with_cond_store_0(
;
-; CHECK: min.iters.checked
+; CHECK: vector.ph
; CHECK: %n.mod.vf = and i64 %[[N:.+]], 1
; CHECK: %[[IsZero:[a-zA-Z0-9]+]] = icmp eq i64 %n.mod.vf, 0
; CHECK: %[[R:.+]] = select i1 %[[IsZero]], i64 2, i64 %n.mod.vf
@@ -58,7 +58,7 @@ for.end:
;
; CHECK-LABEL: @interleaved_with_cond_store_1(
;
-; CHECK: min.iters.checked
+; CHECK: vector.ph
; CHECK: %n.mod.vf = and i64 %[[N:.+]], 1
; CHECK: %[[IsZero:[a-zA-Z0-9]+]] = icmp eq i64 %n.mod.vf, 0
; CHECK: %[[R:.+]] = select i1 %[[IsZero]], i64 2, i64 %n.mod.vf
@@ -117,7 +117,7 @@ for.end:
;
; CHECK-LABEL: @interleaved_with_cond_store_2(
;
-; CHECK: min.iters.checked
+; CHECK: vector.ph
; CHECK: %n.mod.vf = and i64 %[[N:.+]], 1
; CHECK: %[[IsZero:[a-zA-Z0-9]+]] = icmp eq i64 %n.mod.vf, 0
; CHECK: %[[R:.+]] = select i1 %[[IsZero]], i64 2, i64 %n.mod.vf
diff --git a/test/Transforms/LoopVectorize/interleaved-accesses.ll b/test/Transforms/LoopVectorize/interleaved-accesses.ll
index d84dc42bdf543..530c2f66552af 100644
--- a/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -338,7 +338,7 @@ for.body: ; preds = %for.body, %entry
; }
; CHECK-LABEL: @even_load_dynamic_tc(
-; CHECK: min.iters.checked:
+; CHECK: vector.ph:
; CHECK: %n.mod.vf = and i64 %[[N:[a-zA-Z0-9]+]], 3
; CHECK: %[[IsZero:[a-zA-Z0-9]+]] = icmp eq i64 %n.mod.vf, 0
; CHECK: %[[R:[a-zA-Z0-9]+]] = select i1 %[[IsZero]], i64 4, i64 %n.mod.vf
@@ -579,7 +579,7 @@ for.body: ; preds = %for.body, %entry
; }
; CHECK-LABEL: @PR27626_0(
-; CHECK: min.iters.checked:
+; CHECK: vector.ph:
; CHECK: %n.mod.vf = and i64 %[[N:.+]], 3
; CHECK: %[[IsZero:[a-zA-Z0-9]+]] = icmp eq i64 %n.mod.vf, 0
; CHECK: %[[R:[a-zA-Z0-9]+]] = select i1 %[[IsZero]], i64 4, i64 %n.mod.vf
@@ -627,7 +627,7 @@ for.end:
; }
; CHECK-LABEL: @PR27626_1(
-; CHECK: min.iters.checked:
+; CHECK: vector.ph:
; CHECK: %n.mod.vf = and i64 %[[N:.+]], 3
; CHECK: %[[IsZero:[a-zA-Z0-9]+]] = icmp eq i64 %n.mod.vf, 0
; CHECK: %[[R:[a-zA-Z0-9]+]] = select i1 %[[IsZero]], i64 4, i64 %n.mod.vf
@@ -680,7 +680,7 @@ for.end:
; }
; CHECK-LABEL: @PR27626_2(
-; CHECK: min.iters.checked:
+; CHECK: vector.ph:
; CHECK: %n.mod.vf = and i64 %[[N:.+]], 3
; CHECK: %[[IsZero:[a-zA-Z0-9]+]] = icmp eq i64 %n.mod.vf, 0
; CHECK: %[[R:[a-zA-Z0-9]+]] = select i1 %[[IsZero]], i64 4, i64 %n.mod.vf
@@ -728,7 +728,7 @@ for.end:
; }
; CHECK-LABEL: @PR27626_3(
-; CHECK: min.iters.checked:
+; CHECK: vector.ph:
; CHECK: %n.mod.vf = and i64 %[[N:.+]], 3
; CHECK: %[[IsZero:[a-zA-Z0-9]+]] = icmp eq i64 %n.mod.vf, 0
; CHECK: %[[R:[a-zA-Z0-9]+]] = select i1 %[[IsZero]], i64 4, i64 %n.mod.vf
diff --git a/test/Transforms/LoopVectorize/iv_outside_user.ll b/test/Transforms/LoopVectorize/iv_outside_user.ll
index 8a44af96e7f4b..265188886996b 100644
--- a/test/Transforms/LoopVectorize/iv_outside_user.ll
+++ b/test/Transforms/LoopVectorize/iv_outside_user.ll
@@ -135,7 +135,7 @@ for.end:
}
; CHECK-LABEL: @PR30742
-; CHECK: min.iters.checked
+; CHECK: vector.ph
; CHECK: %[[N_MOD_VF:.+]] = urem i32 %[[T5:.+]], 2
; CHECK: %[[N_VEC:.+]] = sub i32 %[[T5]], %[[N_MOD_VF]]
; CHECK: middle.block
diff --git a/test/Transforms/LoopVectorize/miniters.ll b/test/Transforms/LoopVectorize/miniters.ll
index 1cb67f9150ac2..f5f4eb5eaa01c 100644
--- a/test/Transforms/LoopVectorize/miniters.ll
+++ b/test/Transforms/LoopVectorize/miniters.ll
@@ -10,10 +10,10 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; Generate min.iters.check to skip the vector loop and jump to scalar.ph directly when loop iteration number is less than VF * UF.
; CHECK-LABEL: foo(
; CHECK: %min.iters.check = icmp ult i64 %N, 4
-; CHECK: br i1 %min.iters.check, label %scalar.ph, label %min.iters.checked
+; CHECK: br i1 %min.iters.check, label %scalar.ph, label %vector.ph
; UNROLL-LABEL: foo(
; UNROLL: %min.iters.check = icmp ult i64 %N, 8
-; UNROLL: br i1 %min.iters.check, label %scalar.ph, label %min.iters.checked
+; UNROLL: br i1 %min.iters.check, label %scalar.ph, label %vector.ph
define void @foo(i64 %N) {
entry:
diff --git a/test/Transforms/LoopVectorize/pr30654-phiscev-sext-trunc.ll b/test/Transforms/LoopVectorize/pr30654-phiscev-sext-trunc.ll
new file mode 100644
index 0000000000000..40af8f3adf029
--- /dev/null
+++ b/test/Transforms/LoopVectorize/pr30654-phiscev-sext-trunc.ll
@@ -0,0 +1,240 @@
+; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Check that the vectorizer identifies the %p.09 phi,
+; as an induction variable, despite the potential overflow
+; due to the truncation from 32bit to 8bit.
+; SCEV will detect the pattern "sext(trunc(%p.09)) + %step"
+; and generate the required runtime checks under which
+; we can assume no overflow. We check here that we generate
+; exactly two runtime checks:
+; 1) an overflow check:
+; {0,+,(trunc i32 %step to i8)}<%for.body> Added Flags: <nssw>
+; 2) an equality check verifying that the step of the induction
+; is equal to sext(trunc(step)):
+; Equal predicate: %step == (sext i8 (trunc i32 %step to i8) to i32)
+;
+; See also pr30654.
+;
+; int a[N];
+; void doit1(int n, int step) {
+; int i;
+; char p = 0;
+; for (i = 0; i < n; i++) {
+; a[i] = p;
+; p = p + step;
+; }
+; }
+;
+
+; CHECK-LABEL: @doit1
+; CHECK: vector.scevcheck
+; CHECK: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK-NOT: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK: %[[TEST:[0-9]+]] = or i1 {{.*}}, %mul.overflow
+; CHECK: %[[NTEST:[0-9]+]] = or i1 false, %[[TEST]]
+; CHECK: %ident.check = icmp ne i32 {{.*}}, %{{.*}}
+; CHECK: %{{.*}} = or i1 %[[NTEST]], %ident.check
+; CHECK-NOT: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK: vector.body:
+; CHECK: <4 x i32>
+
+@a = common local_unnamed_addr global [250 x i32] zeroinitializer, align 16
+
+; Function Attrs: norecurse nounwind uwtable
+define void @doit1(i32 %n, i32 %step) local_unnamed_addr {
+entry:
+ %cmp7 = icmp sgt i32 %n, 0
+ br i1 %cmp7, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %p.09 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %sext = shl i32 %p.09, 24
+ %conv = ashr exact i32 %sext, 24
+ %arrayidx = getelementptr inbounds [250 x i32], [250 x i32]* @a, i64 0, i64 %indvars.iv
+ store i32 %conv, i32* %arrayidx, align 4
+ %add = add nsw i32 %conv, %step
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+; Same as above, but for checking the SCEV "zext(trunc(%p.09)) + %step".
+; Here we expect the following two predicates to be added for runtime checking:
+; 1) {0,+,(trunc i32 %step to i8)}<%for.body> Added Flags: <nusw>
+; 2) Equal predicate: %step == (zext i8 (trunc i32 %step to i8) to i32)
+;
+; int a[N];
+; void doit2(int n, int step) {
+; int i;
+; unsigned char p = 0;
+; for (i = 0; i < n; i++) {
+; a[i] = p;
+; p = p + step;
+; }
+; }
+;
+
+; CHECK-LABEL: @doit2
+; CHECK: vector.scevcheck
+; CHECK: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK-NOT: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK: %[[TEST:[0-9]+]] = or i1 {{.*}}, %mul.overflow
+; CHECK: %[[NTEST:[0-9]+]] = or i1 false, %[[TEST]]
+; CHECK: %ident.check = icmp ne i32 {{.*}}, %{{.*}}
+; CHECK: %{{.*}} = or i1 %[[NTEST]], %ident.check
+; CHECK-NOT: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK: vector.body:
+; CHECK: <4 x i32>
+
+; Function Attrs: norecurse nounwind uwtable
+define void @doit2(i32 %n, i32 %step) local_unnamed_addr {
+entry:
+ %cmp7 = icmp sgt i32 %n, 0
+ br i1 %cmp7, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %p.09 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %conv = and i32 %p.09, 255
+ %arrayidx = getelementptr inbounds [250 x i32], [250 x i32]* @a, i64 0, i64 %indvars.iv
+ store i32 %conv, i32* %arrayidx, align 4
+ %add = add nsw i32 %conv, %step
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+; Here we check that the same phi scev analysis would fail
+; to create the runtime checks because the step is not invariant.
+; As a result vectorization will fail.
+;
+; int a[N];
+; void doit3(int n, int step) {
+; int i;
+; char p = 0;
+; for (i = 0; i < n; i++) {
+; a[i] = p;
+; p = p + step;
+; step += 2;
+; }
+; }
+;
+
+; CHECK-LABEL: @doit3
+; CHECK-NOT: vector.scevcheck
+; CHECK-NOT: vector.body:
+; CHECK-LABEL: for.body:
+
+; Function Attrs: norecurse nounwind uwtable
+define void @doit3(i32 %n, i32 %step) local_unnamed_addr {
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %p.012 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %step.addr.010 = phi i32 [ %add3, %for.body ], [ %step, %for.body.preheader ]
+ %sext = shl i32 %p.012, 24
+ %conv = ashr exact i32 %sext, 24
+ %arrayidx = getelementptr inbounds [250 x i32], [250 x i32]* @a, i64 0, i64 %indvars.iv
+ store i32 %conv, i32* %arrayidx, align 4
+ %add = add nsw i32 %conv, %step.addr.010
+ %add3 = add nsw i32 %step.addr.010, 2
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+
+; Lastly, we also check the case where we can tell at compile time that
+; the step of the induction is equal to sext(trunc(step)), in which case
+; we don't have to check this equality at runtime (we only need the
+; runtime overflow check). Therefore only the following overflow predicate
+; will be added for runtime checking:
+; {0,+,%cstep}<%for.body> Added Flags: <nssw>
+;
+; a[N];
+; void doit4(int n, char cstep) {
+; int i;
+; char p = 0;
+; int istep = cstep;
+; for (i = 0; i < n; i++) {
+; a[i] = p;
+; p = p + istep;
+; }
+; }
+
+; CHECK-LABEL: @doit4
+; CHECK: vector.scevcheck
+; CHECK: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK-NOT: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK: %{{.*}} = or i1 {{.*}}, %mul.overflow
+; CHECK-NOT: %ident.check = icmp ne i32 {{.*}}, %{{.*}}
+; CHECK-NOT: %{{.*}} = or i1 %{{.*}}, %ident.check
+; CHECK-NOT: %mul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 {{.*}}, i8 {{.*}})
+; CHECK: vector.body:
+; CHECK: <4 x i32>
+
+; Function Attrs: norecurse nounwind uwtable
+define void @doit4(i32 %n, i8 signext %cstep) local_unnamed_addr {
+entry:
+ %conv = sext i8 %cstep to i32
+ %cmp10 = icmp sgt i32 %n, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %p.011 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %sext = shl i32 %p.011, 24
+ %conv2 = ashr exact i32 %sext, 24
+ %arrayidx = getelementptr inbounds [250 x i32], [250 x i32]* @a, i64 0, i64 %indvars.iv
+ store i32 %conv2, i32* %arrayidx, align 4
+ %add = add nsw i32 %conv2, %conv
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/runtime-check-readonly.ll b/test/Transforms/LoopVectorize/runtime-check-readonly.ll
index ac1145aab67b0..b37d94c0c328c 100644
--- a/test/Transforms/LoopVectorize/runtime-check-readonly.ll
+++ b/test/Transforms/LoopVectorize/runtime-check-readonly.ll
@@ -4,7 +4,6 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
;CHECK-LABEL: @add_ints(
;CHECK: br
-;CHECK: br
;CHECK: getelementptr
;CHECK-DAG: getelementptr
;CHECK-DAG: icmp ugt
diff --git a/test/Transforms/LoopVectorize/runtime-check.ll b/test/Transforms/LoopVectorize/runtime-check.ll
index 958b3c135c976..fb05486127156 100644
--- a/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/test/Transforms/LoopVectorize/runtime-check.ll
@@ -10,7 +10,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
;CHECK-LABEL: define i32 @foo
;CHECK: for.body.preheader:
-;CHECK: br i1 %cmp.zero, label %scalar.ph, label %vector.memcheck, !dbg [[BODY_LOC:![0-9]+]]
+;CHECK: br i1 %min.iters.check, label %scalar.ph, label %vector.memcheck, !dbg [[BODY_LOC:![0-9]+]]
;CHECK: vector.memcheck:
;CHECK: br i1 %memcheck.conflict, label %scalar.ph, label %vector.ph, !dbg [[BODY_LOC]]
;CHECK: load <4 x float>
diff --git a/test/tools/llvm-cov/showTabsHTML.cpp b/test/tools/llvm-cov/showTabsHTML.cpp
index 953c06a3c60d0..c092841aeb227 100644
--- a/test/tools/llvm-cov/showTabsHTML.cpp
+++ b/test/tools/llvm-cov/showTabsHTML.cpp
@@ -1,5 +1,5 @@
// RUN: llvm-profdata merge -o %t.profdata %S/Inputs/showTabsHTML.proftext
-// RUN: llvm-cov show %S/Inputs/showTabsHTML.covmapping -format html -instr-profile %t.profdata -filename-equivalence %s | FileCheck -check-prefix=CHECK %s
+// RUN: llvm-cov show %S/Inputs/showTabsHTML.covmapping -format html -instr-profile %t.profdata -filename-equivalence %s | FileCheck %s
int main(int argc, char ** argv) {
(void) "This tab starts at column 0"; // CHECK: &nbsp;&nbsp;(void) &quot;This tab starts at column 0&quot;;
@@ -13,4 +13,4 @@ int main(int argc, char ** argv) {
// CHECK-TABSIZE: &nbsp;&nbsp;&nbsp;(void) &quot;This tab starts at column 0&quot;;
// CHECK-TABSIZE: (void) &quot;&nbsp;&nbsp;This tab starts at column 10&quot;;
-// CHECK-TABSIZE: (void) &quot;This &nbsp;&nbsp;&nbsp; tab starts at column 15&quot;; \ No newline at end of file
+// CHECK-TABSIZE: (void) &quot;This &nbsp;&nbsp;&nbsp; tab starts at column 15&quot;;
diff --git a/test/tools/llvm-dwarfdump/X86/verify_debug_info.s b/test/tools/llvm-dwarfdump/X86/verify_debug_info.s
new file mode 100644
index 0000000000000..90733eda278fb
--- /dev/null
+++ b/test/tools/llvm-dwarfdump/X86/verify_debug_info.s
@@ -0,0 +1,193 @@
+# RUN: llvm-mc %s -filetype obj -triple x86_64-apple-darwin -o - \
+# RUN: | not llvm-dwarfdump -verify - \
+# RUN: | FileCheck %s
+
+# CHECK: Verifying .debug_info Unit Header Chain...
+# CHECK-NEXT: error: DIE has invalid DW_AT_stmt_list encoding:{{[[:space:]]}}
+# CHECK-NEXT: 0x0000000c: DW_TAG_compile_unit [1] *
+# CHECK-NEXT: DW_AT_producer [DW_FORM_strp] ( .debug_str[0x00000000] = "clang version 5.0.0 (trunk 308185) (llvm/trunk 308186)")
+# CHECK-NEXT: DW_AT_language [DW_FORM_data2] (DW_LANG_C99)
+# CHECK-NEXT: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000037] = "basic.c")
+# CHECK-NEXT: DW_AT_stmt_list [DW_FORM_strx4] ( indexed (00000000) string = )
+# CHECK-NEXT: DW_AT_comp_dir [DW_FORM_strp] ( .debug_str[0x0000003f] = "/Users/sgravani/Development/tests")
+# CHECK-NEXT: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+# CHECK-NEXT: DW_AT_high_pc [DW_FORM_data4] (0x00000016){{[[:space:]]}}
+# CHECK-NEXT: Units[2] - start offset: 0x00000068
+# CHECK-NEXT: Error: The length for this unit is too large for the .debug_info provided.
+# CHECK-NEXT: Error: The unit type encoding is not valid.
+
+
+ .section __TEXT,__text,regular,pure_instructions
+ .globl _main ## -- Begin function main
+ .p2align 4, 0x90
+_main: ## @main
+Lfunc_begin0:
+ .file 1 "basic.c"
+ .loc 1 1 0 ## basic.c:1:0
+ .cfi_startproc
+## BB#0: ## %entry
+ pushq %rbp
+Lcfi0:
+ .cfi_def_cfa_offset 16
+Lcfi1:
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+Lcfi2:
+ .cfi_def_cfa_register %rbp
+ xorl %eax, %eax
+ movl $0, -4(%rbp)
+Ltmp0:
+ .loc 1 2 7 prologue_end ## basic.c:2:7
+ movl $1, -8(%rbp)
+ .loc 1 3 3 ## basic.c:3:3
+ popq %rbp
+ retq
+Ltmp1:
+Lfunc_end0:
+ .cfi_endproc
+ ## -- End function
+ .section __DWARF,__debug_str,regular,debug
+Linfo_string:
+ .asciz "clang version 5.0.0 (trunk 308185) (llvm/trunk 308186)" ## string offset=0
+ .asciz "basic.c" ## string offset=55
+ .asciz "/Users/sgravani/Development/tests" ## string offset=63
+ .asciz "main" ## string offset=97
+ .asciz "int" ## string offset=102
+ .asciz "a" ## string offset=106
+ .section __DWARF,__debug_abbrev,regular,debug
+Lsection_abbrev:
+ .byte 1 ## Abbreviation Code
+ .byte 17 ## DW_TAG_compile_unit
+ .byte 1 ## DW_CHILDREN_yes
+ .byte 37 ## DW_AT_producer
+ .byte 14 ## DW_FORM_strp
+ .byte 19 ## DW_AT_language
+ .byte 5 ## DW_FORM_data2
+ .byte 3 ## DW_AT_name
+ .byte 14 ## DW_FORM_strp
+ .byte 16 ## DW_AT_stmt_list
+ .byte 40 ## DW_FORM_sec_offset -- error: DIE has invalid DW_AT_stmt_list encoding:
+ .byte 27 ## DW_AT_comp_dir
+ .byte 14 ## DW_FORM_strp
+ .byte 17 ## DW_AT_low_pc
+ .byte 1 ## DW_FORM_addr
+ .byte 18 ## DW_AT_high_pc
+ .byte 6 ## DW_FORM_data4
+ .byte 0 ## EOM(1)
+ .byte 0 ## EOM(2)
+ .byte 2 ## Abbreviation Code
+ .byte 46 ## DW_TAG_subprogram
+ .byte 1 ## DW_CHILDREN_yes
+ .byte 17 ## DW_AT_low_pc
+ .byte 1 ## DW_FORM_addr
+ .byte 18 ## DW_AT_high_pc
+ .byte 6 ## DW_FORM_data4
+ .byte 64 ## DW_AT_frame_base
+ .byte 24 ## DW_FORM_exprloc
+ .byte 3 ## DW_AT_name
+ .byte 14 ## DW_FORM_strp
+ .byte 58 ## DW_AT_decl_file
+ .byte 11 ## DW_FORM_data1
+ .byte 59 ## DW_AT_decl_line
+ .byte 11 ## DW_FORM_data1
+ .byte 39 ## DW_AT_prototyped
+ .byte 25 ## DW_FORM_flag_present
+ .byte 73 ## DW_AT_type
+ .byte 19 ## DW_FORM_ref4
+ .byte 63 ## DW_AT_external
+ .byte 25 ## DW_FORM_flag_present
+ .byte 0 ## EOM(1)
+ .byte 0 ## EOM(2)
+ .byte 3 ## Abbreviation Code
+ .byte 52 ## DW_TAG_variable
+ .byte 0 ## DW_CHILDREN_no
+ .byte 2 ## DW_AT_location
+ .byte 24 ## DW_FORM_exprloc
+ .byte 3 ## DW_AT_name
+ .byte 14 ## DW_FORM_strp
+ .byte 58 ## DW_AT_decl_file
+ .byte 11 ## DW_FORM_data1
+ .byte 59 ## DW_AT_decl_line
+ .byte 11 ## DW_FORM_data1
+ .byte 73 ## DW_AT_type
+ .byte 19 ## DW_FORM_ref4
+ .byte 0 ## EOM(1)
+ .byte 0 ## EOM(2)
+ .byte 4 ## Abbreviation Code
+ .byte 36 ## DW_TAG_base_type
+ .byte 0 ## DW_CHILDREN_no
+ .byte 3 ## DW_AT_name
+ .byte 14 ## DW_FORM_strp
+ .byte 62 ## DW_AT_encoding
+ .byte 11 ## DW_FORM_data1
+ .byte 11 ## DW_AT_byte_size
+ .byte 11 ## DW_FORM_data1
+ .byte 0 ## EOM(1)
+ .byte 0 ## EOM(2)
+ .byte 0 ## EOM(3)
+ .section __DWARF,__debug_info,regular,debug
+Lsection_info:
+Lcu_begin0:
+ .long 87 ## Length of Unit
+ .short 5 ## DWARF version number
+ .byte 1 ## DWARF Unit Type
+ .byte 8 ## Address Size (in bytes)
+Lset0 = Lsection_abbrev-Lsection_abbrev ## Offset Into Abbrev. Section
+ .long Lset0
+ .byte 1 ## Abbrev [1] 0xc:0x4f DW_TAG_compile_unit
+ .long 0 ## DW_AT_producer
+ .short 12 ## DW_AT_language
+ .long 55 ## DW_AT_name
+Lset1 = Lline_table_start0-Lsection_line ## DW_AT_stmt_list
+ .long Lset1
+ .long 63 ## DW_AT_comp_dir
+ .quad Lfunc_begin0 ## DW_AT_low_pc
+Lset2 = Lfunc_end0-Lfunc_begin0 ## DW_AT_high_pc
+ .long Lset2
+ .byte 2 ## Abbrev [2] 0x2b:0x28 DW_TAG_subprogram
+ .quad Lfunc_begin0 ## DW_AT_low_pc
+Lset3 = Lfunc_end0-Lfunc_begin0 ## DW_AT_high_pc
+ .long Lset3
+ .byte 1 ## DW_AT_frame_base
+ .byte 86
+ .long 97 ## DW_AT_name
+ .byte 1 ## DW_AT_decl_file
+ .byte 1 ## DW_AT_decl_line
+ ## DW_AT_prototyped
+ .long 83 ## DW_AT_type
+ ## DW_AT_external
+ .byte 3 ## Abbrev [3] 0x44:0xe DW_TAG_variable
+ .byte 2 ## DW_AT_location
+ .byte 145
+ .byte 120
+ .long 106 ## DW_AT_name
+ .byte 1 ## DW_AT_decl_file
+ .byte 2 ## DW_AT_decl_line
+ .long 83 ## DW_AT_type
+ .byte 0 ## End Of Children Mark
+ .byte 4 ## Abbrev [4] 0x53:0x7 DW_TAG_base_type
+ .long 102 ## DW_AT_name
+ .byte 5 ## DW_AT_encoding
+ .byte 4 ## DW_AT_byte_size
+ .byte 0 ## End Of Children Mark
+Lcu_begin1:
+ .long 9 ## Length of Unit
+ .short 5 ## DWARF version number
+ .byte 1 ## DWARF Unit Type
+ .byte 4 ## Address Size (in bytes)
+ .long 0 ## Abbrev offset
+ .byte 0
+Ltu_begin0:
+ .long 26 ## Length of Unit -- Error: The length for this unit is too large for the .debug_info provided.
+ .short 5 ## DWARF version number
+ .byte 0 ## DWARF Unit Type
+ .byte 4 ## Address Size (in bytes)
+ .long 0
+ .quad 0
+ .long 0
+ .byte 0
+
+.subsections_via_symbols
+ .section __DWARF,__debug_line,regular,debug
+Lsection_line:
+Lline_table_start0:
diff --git a/test/tools/llvm-dwarfdump/X86/verify_unit_header_chain.s b/test/tools/llvm-dwarfdump/X86/verify_unit_header_chain.s
new file mode 100644
index 0000000000000..a3a54077bbf9c
--- /dev/null
+++ b/test/tools/llvm-dwarfdump/X86/verify_unit_header_chain.s
@@ -0,0 +1,81 @@
+# RUN: llvm-mc %s -filetype obj -triple x86_64-apple-darwin -o - \
+# RUN: | not llvm-dwarfdump -verify - \
+# RUN: | FileCheck %s
+
+# CHECK: Verifying .debug_info Unit Header Chain...
+# CHECK-NEXT: Units[1] - start offset: 0x0000000d
+# CHECK-NEXT: Error: The unit type encoding is not valid.
+# CHECK-NEXT: Error: The address size is unsupported.
+# CHECK-NEXT: Units[2] - start offset: 0x00000026
+# CHECK-NEXT: Error: The 16 bit unit header version is not valid.
+# CHECK-NEXT: Error: The offset into the .debug_abbrev section is not valid.
+# CHECK-NEXT: Units[4] - start offset: 0x00000041
+# CHECK-NEXT: Error: The length for this unit is too large for the .debug_info provided.
+
+ .section __TEXT,__text,regular,pure_instructions
+ .file 1 "basic.c"
+ .comm _i,4,2 ## @i
+ .comm _j,4,2 ## @j
+ .section __DWARF,__debug_str,regular,debug
+Linfo_string:
+ .asciz "clang version 5.0.0 (trunk 307232) (llvm/trunk 307042)" ## string offset=0
+ .asciz "basic.c" ## string offset=55
+ .asciz "/Users/sgravani/Development/tests" ## string offset=63
+ .asciz "i" ## string offset=97
+ .asciz "int" ## string offset=99
+ .asciz "j" ## string offset=103
+ .section __DWARF,__debug_abbrev,regular,debug
+Lsection_abbrev:
+ .byte 1 ## Abbreviation Code
+ .byte 17 ## DW_TAG_compile_unit
+ .byte 0 ## EOM(1)
+ .byte 0 ## EOM(2)
+ .byte 0 ## EOM(3)
+ .section __DWARF,__debug_info,regular,debug
+Lsection_info:
+Lcu_begin0:
+ .long 9 ## Length of Unit
+ .short 4 ## DWARF version number
+Lset0 = Lsection_abbrev-Lsection_abbrev ## Offset Into Abbrev. Section
+ .long Lset0
+ .byte 4 ## Address Size (in bytes)
+ .byte 1 ## Abbrev [1] 0xc:0x45 DW_TAG_compile_unit
+ .byte 0 ## End Of Children Mark
+Ltu_begin0:
+ .long 21 ## Length of Unit
+ .short 5 ## DWARF version number
+ .byte 0 ## DWARF Unit Type -- Error: The unit type encoding is not valid.
+ .byte 3 ## Address Size (in bytes) -- Error: The address size is unsupported.
+ .long 0
+ .quad 0
+ .long 0
+ .byte 0
+Lcu_begin1:
+ .long 10 ## Length of Unit
+ .short 6 ## DWARF version number -- Error: The 16 bit unit header version is not valid.
+ .byte 1 ## DWARF Unit Type
+ .byte 4 ## Address Size (in bytes) -- The offset into the .debug_abbrev section is not valid.
+ .long Lline_table_start0
+ .byte 1 ## Abbrev [1] 0xc:0x45 DW_TAG_compile_unit
+ .byte 0 ## End Of Children Mark
+Lcu_begin2:
+ .long 9 ## Length of Unit
+ .short 5 ## DWARF version number
+ .byte 1 ## DWARF Unit Type
+ .byte 4 ## Address Size (in bytes)
+ .long 0 ## Abbrev offset
+ .byte 0
+Ltu_begin1:
+ .long 26 ## Length of Unit -- Error: The length for this unit is too large for the .debug_info provided.
+ .short 5 ## DWARF version number
+ .byte 2 ## DWARF Unit Type
+ .byte 4 ## Address Size (in bytes)
+ .long 0
+ .quad 0
+ .long 0
+ .byte 0
+
+.subsections_via_symbols
+ .section __DWARF,__debug_line,regular,debug
+Lsection_line:
+Lline_table_start0:
diff --git a/test/tools/llvm-mt/help.test b/test/tools/llvm-mt/help.test
new file mode 100644
index 0000000000000..29e3667ec2cad
--- /dev/null
+++ b/test/tools/llvm-mt/help.test
@@ -0,0 +1,7 @@
+RUN: llvm-mt /h | FileCheck %s -check-prefix=HELP
+
+RUN: llvm-mt /inputresource:foo.res /manifest foo.manifest | FileCheck %s -check-prefix=NOT_SUPPORTED
+
+HELP: OVERVIEW: Manifest Tool
+
+NOT_SUPPORTED: llvm-mt: ignoring unsupported 'inputresource:' option
diff --git a/test/tools/llvm-objdump/AArch64/Inputs/reloc-addend.obj.macho-aarch64 b/test/tools/llvm-objdump/AArch64/Inputs/reloc-addend.obj.macho-aarch64
new file mode 100644
index 0000000000000..58ed3c7a48fc5
--- /dev/null
+++ b/test/tools/llvm-objdump/AArch64/Inputs/reloc-addend.obj.macho-aarch64
Binary files differ
diff --git a/test/tools/llvm-objdump/AArch64/macho-reloc-addend.test b/test/tools/llvm-objdump/AArch64/macho-reloc-addend.test
new file mode 100644
index 0000000000000..4cc4a6eca2a11
--- /dev/null
+++ b/test/tools/llvm-objdump/AArch64/macho-reloc-addend.test
@@ -0,0 +1,6 @@
+RUN: llvm-objdump -r %p/Inputs/reloc-addend.obj.macho-aarch64 | FileCheck %s
+
+CHECK-DAG: 0000000000000004 ARM64_RELOC_ADDEND 0x999
+CHECK-DAG: 0000000000000004 ARM64_RELOC_PAGEOFF12 _stringbuf
+CHECK-DAG: 0000000000000000 ARM64_RELOC_ADDEND 0x999
+CHECK-DAG: 0000000000000000 ARM64_RELOC_PAGE21 _stringbuf
diff --git a/test/tools/llvm-readobj/Inputs/dynamic-table-so.x86 b/test/tools/llvm-readobj/Inputs/dynamic-table-so.x86
index fb9d37881c983..01bd1c2fc1edf 100644
--- a/test/tools/llvm-readobj/Inputs/dynamic-table-so.x86
+++ b/test/tools/llvm-readobj/Inputs/dynamic-table-so.x86
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/dynamic-table.c b/test/tools/llvm-readobj/Inputs/dynamic-table.c
index b5251f87ee533..0af66ca0c53f4 100644
--- a/test/tools/llvm-readobj/Inputs/dynamic-table.c
+++ b/test/tools/llvm-readobj/Inputs/dynamic-table.c
@@ -1,10 +1,10 @@
// clang -target x86_64-linux-gnu -shared -fPIC -lc dynamic-table.c \
-// -o dynamic-table-so.x86 -Wl,-f,aux_val
+// -o dynamic-table-so.x86 -Wl,-f,aux.so -Wl,-F,filter.so
// clang -target mipsel-linux-gnu -shared -fPIC -lc dynamic-table.c \
// -o dynamic-table-so.mips
// clang -target mipsel-linux-gnu -lc dynamic-table.c \
// -o dynamic-table-exe.mips
-// clang -target aarch64-linux-gnu -fPIC -shared dynamic-table.c \
+// clang -target aarch64-linux-gnu -fPIC -shared dynamic-table.c\
// -o dynamic-table-so.aarch64
int puts(const char *);
diff --git a/test/tools/llvm-readobj/dynamic.test b/test/tools/llvm-readobj/dynamic.test
index 5079d35569579..71b6b06cbc089 100644
--- a/test/tools/llvm-readobj/dynamic.test
+++ b/test/tools/llvm-readobj/dynamic.test
@@ -8,7 +8,7 @@ ELF-MIPS: AddressSize: 32bit
ELF-MIPS: LoadName:
ELF-MIPS: DynamicSection [ (23 entries)
ELF-MIPS: Tag Type Name/Value
-ELF-MIPS: 0x00000001 NEEDED SharedLibrary (libc.so.6)
+ELF-MIPS: 0x00000001 NEEDED Shared library: [libc.so.6]
ELF-MIPS: 0x0000000C INIT 0x528
ELF-MIPS: 0x0000000D FINI 0x860
ELF-MIPS: 0x00000004 HASH 0x210
@@ -43,7 +43,7 @@ ELF-MIPS-EXE: AddressSize: 32bit
ELF-MIPS-EXE: LoadName:
ELF-MIPS-EXE: DynamicSection [ (26 entries)
ELF-MIPS-EXE: Tag Type Name/Value
-ELF-MIPS-EXE: 0x00000001 NEEDED SharedLibrary (libc.so.6)
+ELF-MIPS-EXE: 0x00000001 NEEDED Shared library: [libc.so.6]
ELF-MIPS-EXE: 0x0000000C INIT 0x400418
ELF-MIPS-EXE: 0x0000000D FINI 0x4007B0
ELF-MIPS-EXE: 0x00000004 HASH 0x4002B8
@@ -80,9 +80,9 @@ ELF-X86-EXE: AddressSize: 32bit
ELF-X86-EXE: LoadName:
ELF-X86-EXE: DynamicSection [ (30 entries)
ELF-X86-EXE: Tag Type Name/Value
-ELF-X86-EXE: 0x00000001 NEEDED SharedLibrary (libstdc++.so.6)
-ELF-X86-EXE: 0x00000001 NEEDED SharedLibrary (libgcc_s.so.1)
-ELF-X86-EXE: 0x00000001 NEEDED SharedLibrary (libc.so.6)
+ELF-X86-EXE: 0x00000001 NEEDED Shared library: [libstdc++.so.6]
+ELF-X86-EXE: 0x00000001 NEEDED Shared library: [libgcc_s.so.1]
+ELF-X86-EXE: 0x00000001 NEEDED Shared library: [libc.so.6]
ELF-X86-EXE: 0x0000000C INIT 0x62C
ELF-X86-EXE: 0x0000000D FINI 0x920
ELF-X86-EXE: 0x00000019 INIT_ARRAY 0x19FC
@@ -119,32 +119,33 @@ ELF-X86-SO: Format: ELF64-x86-64
ELF-X86-SO: Arch: x86_64
ELF-X86-SO: AddressSize: 64bit
ELF-X86-SO: LoadName:
-ELF-X86-SO: DynamicSection [ (26 entries)
+ELF-X86-SO: DynamicSection [ ({{[0-9]+}} entries)
ELF-X86-SO: Tag Type Name/Value
-ELF-X86-SO: 0x0000000000000001 NEEDED SharedLibrary (libc.so.6)
-ELF-X86-SO: 0x0000000000000001 NEEDED SharedLibrary (ld-linux-x86-64.so.2)
-ELF-X86-SO: 0x000000007FFFFFFD AUXILIARY Auxiliary library: [aux_val]
-ELF-X86-SO: 0x000000000000000C INIT 0x610
-ELF-X86-SO: 0x000000000000000D FINI 0x7AC
-ELF-X86-SO: 0x0000000000000019 INIT_ARRAY 0x200DD0
+ELF-X86-SO: 0x0000000000000001 NEEDED Shared library: [libc.so.6]
+ELF-X86-SO: 0x0000000000000001 NEEDED Shared library: [ld-linux-x86-64.so.2]
+ELF-X86-SO: 0x000000007FFFFFFF FILTER Filter library: [filter.so]
+ELF-X86-SO: 0x000000007FFFFFFD AUXILIARY Auxiliary library: [aux.so]
+ELF-X86-SO: 0x000000000000000C INIT 0x{{[0-9A-F]+}}
+ELF-X86-SO: 0x000000000000000D FINI 0x{{[0-9A-F]+}}
+ELF-X86-SO: 0x0000000000000019 INIT_ARRAY 0x{{[0-9A-F]+}}
ELF-X86-SO: 0x000000000000001B INIT_ARRAYSZ 8 (bytes)
-ELF-X86-SO: 0x000000000000001A FINI_ARRAY 0x200DD8
+ELF-X86-SO: 0x000000000000001A FINI_ARRAY 0x{{[0-9A-F]+}}
ELF-X86-SO: 0x000000000000001C FINI_ARRAYSZ 8 (bytes)
ELF-X86-SO: 0x000000006FFFFEF5 GNU_HASH 0x1C8
ELF-X86-SO: 0x0000000000000005 STRTAB 0x3A0
ELF-X86-SO: 0x0000000000000006 SYMTAB 0x208
-ELF-X86-SO: 0x000000000000000A STRSZ 231 (bytes)
+ELF-X86-SO: 0x000000000000000A STRSZ {{[0-9]+}} (bytes)
ELF-X86-SO: 0x000000000000000B SYMENT 24 (bytes)
ELF-X86-SO: 0x0000000000000003 PLTGOT 0x201000
ELF-X86-SO: 0x0000000000000002 PLTRELSZ 48 (bytes)
ELF-X86-SO: 0x0000000000000014 PLTREL RELA
-ELF-X86-SO: 0x0000000000000017 JMPREL 0x5E0
-ELF-X86-SO: 0x0000000000000007 RELA 0x4F0
+ELF-X86-SO: 0x0000000000000017 JMPREL 0x{{[0-9A-F]+}}
+ELF-X86-SO: 0x0000000000000007 RELA 0x{{[0-9A-F]+}}
ELF-X86-SO: 0x0000000000000008 RELASZ 240 (bytes)
ELF-X86-SO: 0x0000000000000009 RELAENT 24 (bytes)
-ELF-X86-SO: 0x000000006FFFFFFE VERNEED 0x4B0
+ELF-X86-SO: 0x000000006FFFFFFE VERNEED 0x{{[0-9A-F]+}}
ELF-X86-SO: 0x000000006FFFFFFF VERNEEDNUM 2
-ELF-X86-SO: 0x000000006FFFFFF0 VERSYM 0x488
+ELF-X86-SO: 0x000000006FFFFFF0 VERSYM 0x{{[0-9A-F]+}}
ELF-X86-SO: 0x000000006FFFFFF9 RELACOUNT 3
ELF-X86-SO: 0x0000000000000000 NULL 0x0
@@ -157,7 +158,7 @@ ELF-AARCH64-SO: AddressSize: 64bit
ELF-AARCH64-SO: LoadName:
ELF-AARCH64-SO: DynamicSection [ (26 entries)
ELF-AARCH64-SO: Tag Type Name/Value
-ELF-AARCH64-SO: 0x0000000000000001 NEEDED SharedLibrary (libc.so.6)
+ELF-AARCH64-SO: 0x0000000000000001 NEEDED Shared library: [libc.so.6]
ELF-AARCH64-SO: 0x000000000000000C INIT 0x660
ELF-AARCH64-SO: 0x000000000000000D FINI 0x83C
ELF-AARCH64-SO: 0x0000000000000019 INIT_ARRAY 0x10DB8
diff --git a/test/tools/llvm-readobj/gnu-sections.test b/test/tools/llvm-readobj/gnu-sections.test
index fb90ce44d10f9..f5b504fa66fa7 100644
--- a/test/tools/llvm-readobj/gnu-sections.test
+++ b/test/tools/llvm-readobj/gnu-sections.test
@@ -1,6 +1,14 @@
RUN: llvm-readobj -s %p/Inputs/relocs.obj.elf-i386 --elf-output-style=GNU \
RUN: | FileCheck %s -check-prefix ELF32
-RUN: llvm-readobj -s %p/Inputs/relocs.obj.elf-x86_64 --elf-output-style=GNU \
+RUN: llvm-readobj -S %p/Inputs/relocs.obj.elf-x86_64 --elf-output-style=GNU \
+RUN: | FileCheck %s -check-prefix ELF64
+RUN: llvm-readobj --wide --sections \
+RUN: %p/Inputs/relocs.obj.elf-x86_64 --elf-output-style=GNU \
+RUN: | FileCheck %s -check-prefix ELF64
+RUN: llvm-readobj -W --sections \
+RUN: %p/Inputs/relocs.obj.elf-x86_64 --elf-output-style=GNU \
+RUN: | FileCheck %s -check-prefix ELF64
+RUN: llvm-readelf -W -S %p/Inputs/relocs.obj.elf-x86_64 \
RUN: | FileCheck %s -check-prefix ELF64
ELF32: Section Headers: