summaryrefslogtreecommitdiff
path: root/test/MC/X86
diff options
context:
space:
mode:
Diffstat (limited to 'test/MC/X86')
-rw-r--r--test/MC/X86/AlignedBundling/align-mode-argument-error.s8
-rw-r--r--test/MC/X86/AlignedBundling/asm-printing-bundle-directives.s22
-rw-r--r--test/MC/X86/AlignedBundling/autogen-inst-offset-align-to-end.s2899
-rw-r--r--test/MC/X86/AlignedBundling/autogen-inst-offset-padding.s2674
-rw-r--r--test/MC/X86/AlignedBundling/bundle-group-too-large-error.s17
-rw-r--r--test/MC/X86/AlignedBundling/bundle-lock-option-error.s11
-rw-r--r--test/MC/X86/AlignedBundling/different-sections.s25
-rw-r--r--test/MC/X86/AlignedBundling/lit.local.cfg6
-rw-r--r--test/MC/X86/AlignedBundling/lock-without-bundle-mode-error.s10
-rw-r--r--test/MC/X86/AlignedBundling/long-nop-pad.s27
-rw-r--r--test/MC/X86/AlignedBundling/pad-align-to-bundle-end.s33
-rw-r--r--test/MC/X86/AlignedBundling/pad-bundle-groups.s46
-rw-r--r--test/MC/X86/AlignedBundling/relax-at-bundle-end.s16
-rw-r--r--test/MC/X86/AlignedBundling/relax-in-bundle-group.s42
-rw-r--r--test/MC/X86/AlignedBundling/single-inst-bundling.s47
-rw-r--r--test/MC/X86/AlignedBundling/switch-section-locked-error.s16
-rw-r--r--test/MC/X86/AlignedBundling/unlock-without-lock-error.s11
-rw-r--r--test/MC/X86/fde-reloc.s11
-rw-r--r--test/MC/X86/gnux32-dwarf-gen.s24
-rw-r--r--test/MC/X86/intel-syntax-encoding.s21
-rw-r--r--test/MC/X86/intel-syntax-hex.s26
-rw-r--r--test/MC/X86/intel-syntax.s200
-rw-r--r--test/MC/X86/lit.local.cfg9
-rw-r--r--test/MC/X86/shuffle-comments.s271
-rw-r--r--test/MC/X86/x86-32-avx.s60
-rw-r--r--test/MC/X86/x86-32-coverage.s120
-rw-r--r--test/MC/X86/x86-32-ms-inline-asm.s33
-rw-r--r--test/MC/X86/x86-64.s10
-rw-r--r--test/MC/X86/x86_64-avx-encoding.s60
-rw-r--r--test/MC/X86/x86_64-fma4-encoding.s65
-rw-r--r--test/MC/X86/x86_64-rand-encoding.s49
-rw-r--r--test/MC/X86/x86_64-rtm-encoding.s4
-rw-r--r--test/MC/X86/x86_errors.s2
-rw-r--r--test/MC/X86/x86_long_nop.s15
34 files changed, 6771 insertions, 119 deletions
diff --git a/test/MC/X86/AlignedBundling/align-mode-argument-error.s b/test/MC/X86/AlignedBundling/align-mode-argument-error.s
new file mode 100644
index 0000000000000..b4ce0a9d103ab
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/align-mode-argument-error.s
@@ -0,0 +1,8 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+# Missing .bundle_align_mode argument
+# CHECK: error: unknown token
+
+ .bundle_align_mode
+ imull $17, %ebx, %ebp
+
diff --git a/test/MC/X86/AlignedBundling/asm-printing-bundle-directives.s b/test/MC/X86/AlignedBundling/asm-printing-bundle-directives.s
new file mode 100644
index 0000000000000..387e0fe59bf29
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/asm-printing-bundle-directives.s
@@ -0,0 +1,22 @@
+# RUN: llvm-mc -filetype=asm -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+# Just a simple test for the assembly emitter - making sure it emits back the
+# bundling directives.
+
+ .text
+foo:
+ .bundle_align_mode 4
+# CHECK: .bundle_align_mode 4
+ pushq %rbp
+ .bundle_lock
+# CHECK: .bundle_lock
+ cmpl %r14d, %ebp
+ jle .L_ELSE
+ .bundle_unlock
+# CHECK: .bundle_unlock
+ .bundle_lock align_to_end
+# CHECK: .bundle_lock align_to_end
+ add %rbx, %rdx
+ .bundle_unlock
+
+
diff --git a/test/MC/X86/AlignedBundling/autogen-inst-offset-align-to-end.s b/test/MC/X86/AlignedBundling/autogen-inst-offset-align-to-end.s
new file mode 100644
index 0000000000000..fbf5b5294460f
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/autogen-inst-offset-align-to-end.s
@@ -0,0 +1,2899 @@
+# RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -triple i386 -disassemble -no-show-raw-insn - | FileCheck %s
+
+# !!! This test is auto-generated from utils/testgen/mc-bundling-x86-gen.py !!!
+# It tests that bundle-aligned grouping works correctly in MC. Read the
+# source of the script for more details.
+
+ .text
+ .bundle_align_mode 4
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 0: nop
+# CHECK: f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 21: nop
+# CHECK: 2f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 42: nop
+# CHECK: 4f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 63: nop
+# CHECK: 6f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 84: nop
+# CHECK: 8f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a5: nop
+# CHECK: af: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c6: nop
+# CHECK: cf: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e7: nop
+# CHECK: ef: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 108: nop
+# CHECK: 10f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 129: nop
+# CHECK: 12f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 14a: nop
+# CHECK: 14f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 16b: nop
+# CHECK: 16f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 18c: nop
+# CHECK: 18f: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ad: nop
+# CHECK: 1af: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ce: nop
+# CHECK: 1cf: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ef: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 200: nop
+# CHECK: 20e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 221: nop
+# CHECK: 22e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 242: nop
+# CHECK: 24e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 263: nop
+# CHECK: 26e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 284: nop
+# CHECK: 28e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 2a5: nop
+# CHECK: 2ae: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 2c6: nop
+# CHECK: 2ce: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 2e7: nop
+# CHECK: 2ee: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 308: nop
+# CHECK: 30e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 329: nop
+# CHECK: 32e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 34a: nop
+# CHECK: 34e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 36b: nop
+# CHECK: 36e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 38c: nop
+# CHECK: 38e: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 3ad: nop
+# CHECK: 3ae: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 3ce: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 3ef: nop
+# CHECK: 3f0: nop
+# CHECK: 3fe: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 400: nop
+# CHECK: 40d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 421: nop
+# CHECK: 42d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 442: nop
+# CHECK: 44d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 463: nop
+# CHECK: 46d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 484: nop
+# CHECK: 48d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 4a5: nop
+# CHECK: 4ad: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 4c6: nop
+# CHECK: 4cd: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 4e7: nop
+# CHECK: 4ed: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 508: nop
+# CHECK: 50d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 529: nop
+# CHECK: 52d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 54a: nop
+# CHECK: 54d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 56b: nop
+# CHECK: 56d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 58c: nop
+# CHECK: 58d: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 5ad: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 5ce: nop
+# CHECK: 5d0: nop
+# CHECK: 5dd: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 5ef: nop
+# CHECK: 5f0: nop
+# CHECK: 5fd: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 600: nop
+# CHECK: 60c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 621: nop
+# CHECK: 62c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 642: nop
+# CHECK: 64c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 663: nop
+# CHECK: 66c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 684: nop
+# CHECK: 68c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 6a5: nop
+# CHECK: 6ac: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 6c6: nop
+# CHECK: 6cc: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 6e7: nop
+# CHECK: 6ec: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 708: nop
+# CHECK: 70c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 729: nop
+# CHECK: 72c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 74a: nop
+# CHECK: 74c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 76b: nop
+# CHECK: 76c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 78c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 7ad: nop
+# CHECK: 7b0: nop
+# CHECK: 7bc: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 7ce: nop
+# CHECK: 7d0: nop
+# CHECK: 7dc: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 7ef: nop
+# CHECK: 7f0: nop
+# CHECK: 7fc: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 800: nop
+# CHECK: 80b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 821: nop
+# CHECK: 82b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 842: nop
+# CHECK: 84b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 863: nop
+# CHECK: 86b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 884: nop
+# CHECK: 88b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 8a5: nop
+# CHECK: 8ab: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 8c6: nop
+# CHECK: 8cb: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 8e7: nop
+# CHECK: 8eb: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 908: nop
+# CHECK: 90b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 929: nop
+# CHECK: 92b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 94a: nop
+# CHECK: 94b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 96b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 98c: nop
+# CHECK: 990: nop
+# CHECK: 99b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 9ad: nop
+# CHECK: 9b0: nop
+# CHECK: 9bb: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 9ce: nop
+# CHECK: 9d0: nop
+# CHECK: 9db: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 9ef: nop
+# CHECK: 9f0: nop
+# CHECK: 9fb: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a00: nop
+# CHECK: a0a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a21: nop
+# CHECK: a2a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a42: nop
+# CHECK: a4a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a63: nop
+# CHECK: a6a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a84: nop
+# CHECK: a8a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: aa5: nop
+# CHECK: aaa: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ac6: nop
+# CHECK: aca: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ae7: nop
+# CHECK: aea: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b08: nop
+# CHECK: b0a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b29: nop
+# CHECK: b2a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b4a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b6b: nop
+# CHECK: b70: nop
+# CHECK: b7a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b8c: nop
+# CHECK: b90: nop
+# CHECK: b9a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: bad: nop
+# CHECK: bb0: nop
+# CHECK: bba: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: bce: nop
+# CHECK: bd0: nop
+# CHECK: bda: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: bef: nop
+# CHECK: bf0: nop
+# CHECK: bfa: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c00: nop
+# CHECK: c09: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c21: nop
+# CHECK: c29: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c42: nop
+# CHECK: c49: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c63: nop
+# CHECK: c69: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c84: nop
+# CHECK: c89: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ca5: nop
+# CHECK: ca9: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: cc6: nop
+# CHECK: cc9: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ce7: nop
+# CHECK: ce9: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d08: nop
+# CHECK: d09: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d29: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d4a: nop
+# CHECK: d50: nop
+# CHECK: d59: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d6b: nop
+# CHECK: d70: nop
+# CHECK: d79: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d8c: nop
+# CHECK: d90: nop
+# CHECK: d99: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: dad: nop
+# CHECK: db0: nop
+# CHECK: db9: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: dce: nop
+# CHECK: dd0: nop
+# CHECK: dd9: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: def: nop
+# CHECK: df0: nop
+# CHECK: df9: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e00: nop
+# CHECK: e08: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e21: nop
+# CHECK: e28: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e42: nop
+# CHECK: e48: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e63: nop
+# CHECK: e68: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e84: nop
+# CHECK: e88: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ea5: nop
+# CHECK: ea8: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ec6: nop
+# CHECK: ec8: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ee7: nop
+# CHECK: ee8: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f08: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f29: nop
+# CHECK: f30: nop
+# CHECK: f38: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f4a: nop
+# CHECK: f50: nop
+# CHECK: f58: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f6b: nop
+# CHECK: f70: nop
+# CHECK: f78: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f8c: nop
+# CHECK: f90: nop
+# CHECK: f98: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: fad: nop
+# CHECK: fb0: nop
+# CHECK: fb8: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: fce: nop
+# CHECK: fd0: nop
+# CHECK: fd8: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: fef: nop
+# CHECK: ff0: nop
+# CHECK: ff8: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1000: nop
+# CHECK: 1007: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1021: nop
+# CHECK: 1027: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1042: nop
+# CHECK: 1047: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1063: nop
+# CHECK: 1067: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1084: nop
+# CHECK: 1087: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 10a5: nop
+# CHECK: 10a7: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 10c6: nop
+# CHECK: 10c7: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 10e7: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1108: nop
+# CHECK: 1110: nop
+# CHECK: 1117: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1129: nop
+# CHECK: 1130: nop
+# CHECK: 1137: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 114a: nop
+# CHECK: 1150: nop
+# CHECK: 1157: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 116b: nop
+# CHECK: 1170: nop
+# CHECK: 1177: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 118c: nop
+# CHECK: 1190: nop
+# CHECK: 1197: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 11ad: nop
+# CHECK: 11b0: nop
+# CHECK: 11b7: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 11ce: nop
+# CHECK: 11d0: nop
+# CHECK: 11d7: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 11ef: nop
+# CHECK: 11f0: nop
+# CHECK: 11f7: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1200: nop
+# CHECK: 1206: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1221: nop
+# CHECK: 1226: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1242: nop
+# CHECK: 1246: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1263: nop
+# CHECK: 1266: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1284: nop
+# CHECK: 1286: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 12a5: nop
+# CHECK: 12a6: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 12c6: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 12e7: nop
+# CHECK: 12f0: nop
+# CHECK: 12f6: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1308: nop
+# CHECK: 1310: nop
+# CHECK: 1316: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1329: nop
+# CHECK: 1330: nop
+# CHECK: 1336: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 134a: nop
+# CHECK: 1350: nop
+# CHECK: 1356: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 136b: nop
+# CHECK: 1370: nop
+# CHECK: 1376: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 138c: nop
+# CHECK: 1390: nop
+# CHECK: 1396: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 13ad: nop
+# CHECK: 13b0: nop
+# CHECK: 13b6: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 13ce: nop
+# CHECK: 13d0: nop
+# CHECK: 13d6: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 13ef: nop
+# CHECK: 13f0: nop
+# CHECK: 13f6: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1400: nop
+# CHECK: 1405: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1421: nop
+# CHECK: 1425: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1442: nop
+# CHECK: 1445: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1463: nop
+# CHECK: 1465: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1484: nop
+# CHECK: 1485: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 14a5: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 14c6: nop
+# CHECK: 14d0: nop
+# CHECK: 14d5: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 14e7: nop
+# CHECK: 14f0: nop
+# CHECK: 14f5: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1508: nop
+# CHECK: 1510: nop
+# CHECK: 1515: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1529: nop
+# CHECK: 1530: nop
+# CHECK: 1535: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 154a: nop
+# CHECK: 1550: nop
+# CHECK: 1555: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 156b: nop
+# CHECK: 1570: nop
+# CHECK: 1575: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 158c: nop
+# CHECK: 1590: nop
+# CHECK: 1595: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 15ad: nop
+# CHECK: 15b0: nop
+# CHECK: 15b5: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 15ce: nop
+# CHECK: 15d0: nop
+# CHECK: 15d5: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 15ef: nop
+# CHECK: 15f0: nop
+# CHECK: 15f5: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1600: nop
+# CHECK: 1604: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1621: nop
+# CHECK: 1624: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1642: nop
+# CHECK: 1644: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1663: nop
+# CHECK: 1664: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1684: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 16a5: nop
+# CHECK: 16b0: nop
+# CHECK: 16b4: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 16c6: nop
+# CHECK: 16d0: nop
+# CHECK: 16d4: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 16e7: nop
+# CHECK: 16f0: nop
+# CHECK: 16f4: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1708: nop
+# CHECK: 1710: nop
+# CHECK: 1714: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1729: nop
+# CHECK: 1730: nop
+# CHECK: 1734: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 174a: nop
+# CHECK: 1750: nop
+# CHECK: 1754: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 176b: nop
+# CHECK: 1770: nop
+# CHECK: 1774: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 178c: nop
+# CHECK: 1790: nop
+# CHECK: 1794: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 17ad: nop
+# CHECK: 17b0: nop
+# CHECK: 17b4: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 17ce: nop
+# CHECK: 17d0: nop
+# CHECK: 17d4: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 17ef: nop
+# CHECK: 17f0: nop
+# CHECK: 17f4: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1800: nop
+# CHECK: 1803: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1821: nop
+# CHECK: 1823: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1842: nop
+# CHECK: 1843: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1863: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1884: nop
+# CHECK: 1890: nop
+# CHECK: 1893: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 18a5: nop
+# CHECK: 18b0: nop
+# CHECK: 18b3: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 18c6: nop
+# CHECK: 18d0: nop
+# CHECK: 18d3: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 18e7: nop
+# CHECK: 18f0: nop
+# CHECK: 18f3: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1908: nop
+# CHECK: 1910: nop
+# CHECK: 1913: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1929: nop
+# CHECK: 1930: nop
+# CHECK: 1933: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 194a: nop
+# CHECK: 1950: nop
+# CHECK: 1953: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 196b: nop
+# CHECK: 1970: nop
+# CHECK: 1973: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 198c: nop
+# CHECK: 1990: nop
+# CHECK: 1993: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 19ad: nop
+# CHECK: 19b0: nop
+# CHECK: 19b3: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 19ce: nop
+# CHECK: 19d0: nop
+# CHECK: 19d3: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 19ef: nop
+# CHECK: 19f0: nop
+# CHECK: 19f3: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a00: nop
+# CHECK: 1a02: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a21: nop
+# CHECK: 1a22: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a42: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a63: nop
+# CHECK: 1a70: nop
+# CHECK: 1a72: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a84: nop
+# CHECK: 1a90: nop
+# CHECK: 1a92: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1aa5: nop
+# CHECK: 1ab0: nop
+# CHECK: 1ab2: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ac6: nop
+# CHECK: 1ad0: nop
+# CHECK: 1ad2: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ae7: nop
+# CHECK: 1af0: nop
+# CHECK: 1af2: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b08: nop
+# CHECK: 1b10: nop
+# CHECK: 1b12: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b29: nop
+# CHECK: 1b30: nop
+# CHECK: 1b32: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b4a: nop
+# CHECK: 1b50: nop
+# CHECK: 1b52: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b6b: nop
+# CHECK: 1b70: nop
+# CHECK: 1b72: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b8c: nop
+# CHECK: 1b90: nop
+# CHECK: 1b92: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1bad: nop
+# CHECK: 1bb0: nop
+# CHECK: 1bb2: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1bce: nop
+# CHECK: 1bd0: nop
+# CHECK: 1bd2: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1bef: nop
+# CHECK: 1bf0: nop
+# CHECK: 1bf2: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c00: nop
+# CHECK: 1c01: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c21: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c42: nop
+# CHECK: 1c50: nop
+# CHECK: 1c51: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c63: nop
+# CHECK: 1c70: nop
+# CHECK: 1c71: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c84: nop
+# CHECK: 1c90: nop
+# CHECK: 1c91: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ca5: nop
+# CHECK: 1cb0: nop
+# CHECK: 1cb1: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1cc6: nop
+# CHECK: 1cd0: nop
+# CHECK: 1cd1: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ce7: nop
+# CHECK: 1cf0: nop
+# CHECK: 1cf1: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d08: nop
+# CHECK: 1d10: nop
+# CHECK: 1d11: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d29: nop
+# CHECK: 1d30: nop
+# CHECK: 1d31: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d4a: nop
+# CHECK: 1d50: nop
+# CHECK: 1d51: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d6b: nop
+# CHECK: 1d70: nop
+# CHECK: 1d71: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d8c: nop
+# CHECK: 1d90: nop
+# CHECK: 1d91: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1dad: nop
+# CHECK: 1db0: nop
+# CHECK: 1db1: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1dce: nop
+# CHECK: 1dd0: nop
+# CHECK: 1dd1: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1def: nop
+# CHECK: 1df0: nop
+# CHECK: 1df1: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_0:
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e00: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e21: nop
+# CHECK: 1e30: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e42: nop
+# CHECK: 1e50: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e63: nop
+# CHECK: 1e70: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e84: nop
+# CHECK: 1e90: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ea5: nop
+# CHECK: 1eb0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ec6: nop
+# CHECK: 1ed0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ee7: nop
+# CHECK: 1ef0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f08: nop
+# CHECK: 1f10: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f29: nop
+# CHECK: 1f30: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f4a: nop
+# CHECK: 1f50: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f6b: nop
+# CHECK: 1f70: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f8c: nop
+# CHECK: 1f90: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1fad: nop
+# CHECK: 1fb0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1fce: nop
+# CHECK: 1fd0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock align_to_end
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1fef: nop
+# CHECK: 1ff0: incl
+
diff --git a/test/MC/X86/AlignedBundling/autogen-inst-offset-padding.s b/test/MC/X86/AlignedBundling/autogen-inst-offset-padding.s
new file mode 100644
index 0000000000000..12786b34af72d
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/autogen-inst-offset-padding.s
@@ -0,0 +1,2674 @@
+# RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -triple i386 -disassemble -no-show-raw-insn - | FileCheck %s
+
+# !!! This test is auto-generated from utils/testgen/mc-bundling-x86-gen.py !!!
+# It tests that bundle-aligned grouping works correctly in MC. Read the
+# source of the script for more details.
+
+ .text
+ .bundle_align_mode 4
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_0:
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 0: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 21: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 42: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 63: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 84: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a5: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c6: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e7: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 108: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 129: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 14a: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 16b: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 18c: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ad: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ce: incl
+
+ .align 32, 0x90
+INSTRLEN_1_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 1
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ef: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_0:
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 200: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 221: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 242: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 263: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 284: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 2a5: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 2c6: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 2e7: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 308: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 329: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 34a: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 36b: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 38c: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 3ad: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 3ce: incl
+
+ .align 32, 0x90
+INSTRLEN_2_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 2
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 3ef: nop
+# CHECK: 3f0: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_0:
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 400: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 421: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 442: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 463: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 484: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 4a5: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 4c6: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 4e7: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 508: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 529: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 54a: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 56b: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 58c: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 5ad: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 5ce: nop
+# CHECK: 5d0: incl
+
+ .align 32, 0x90
+INSTRLEN_3_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 3
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 5ef: nop
+# CHECK: 5f0: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_0:
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 600: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 621: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 642: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 663: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 684: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 6a5: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 6c6: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 6e7: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 708: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 729: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 74a: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 76b: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 78c: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 7ad: nop
+# CHECK: 7b0: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 7ce: nop
+# CHECK: 7d0: incl
+
+ .align 32, 0x90
+INSTRLEN_4_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 4
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 7ef: nop
+# CHECK: 7f0: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_0:
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 800: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 821: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 842: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 863: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 884: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 8a5: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 8c6: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 8e7: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 908: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 929: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 94a: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 96b: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 98c: nop
+# CHECK: 990: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 9ad: nop
+# CHECK: 9b0: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 9ce: nop
+# CHECK: 9d0: incl
+
+ .align 32, 0x90
+INSTRLEN_5_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 5
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 9ef: nop
+# CHECK: 9f0: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_0:
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a00: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a21: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a42: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a63: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: a84: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: aa5: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ac6: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ae7: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b08: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b29: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b4a: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b6b: nop
+# CHECK: b70: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: b8c: nop
+# CHECK: b90: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: bad: nop
+# CHECK: bb0: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: bce: nop
+# CHECK: bd0: incl
+
+ .align 32, 0x90
+INSTRLEN_6_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 6
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: bef: nop
+# CHECK: bf0: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_0:
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c00: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c21: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c42: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c63: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: c84: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ca5: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: cc6: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ce7: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d08: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d29: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d4a: nop
+# CHECK: d50: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d6b: nop
+# CHECK: d70: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: d8c: nop
+# CHECK: d90: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: dad: nop
+# CHECK: db0: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: dce: nop
+# CHECK: dd0: incl
+
+ .align 32, 0x90
+INSTRLEN_7_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 7
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: def: nop
+# CHECK: df0: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_0:
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e00: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e21: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e42: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e63: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: e84: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ea5: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ec6: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: ee7: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f08: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f29: nop
+# CHECK: f30: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f4a: nop
+# CHECK: f50: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f6b: nop
+# CHECK: f70: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: f8c: nop
+# CHECK: f90: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: fad: nop
+# CHECK: fb0: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: fce: nop
+# CHECK: fd0: incl
+
+ .align 32, 0x90
+INSTRLEN_8_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 8
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: fef: nop
+# CHECK: ff0: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_0:
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1000: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1021: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1042: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1063: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1084: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 10a5: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 10c6: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 10e7: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1108: nop
+# CHECK: 1110: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1129: nop
+# CHECK: 1130: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 114a: nop
+# CHECK: 1150: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 116b: nop
+# CHECK: 1170: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 118c: nop
+# CHECK: 1190: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 11ad: nop
+# CHECK: 11b0: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 11ce: nop
+# CHECK: 11d0: incl
+
+ .align 32, 0x90
+INSTRLEN_9_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 9
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 11ef: nop
+# CHECK: 11f0: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_0:
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1200: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1221: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1242: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1263: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1284: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 12a5: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 12c6: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 12e7: nop
+# CHECK: 12f0: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1308: nop
+# CHECK: 1310: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1329: nop
+# CHECK: 1330: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 134a: nop
+# CHECK: 1350: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 136b: nop
+# CHECK: 1370: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 138c: nop
+# CHECK: 1390: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 13ad: nop
+# CHECK: 13b0: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 13ce: nop
+# CHECK: 13d0: incl
+
+ .align 32, 0x90
+INSTRLEN_10_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 10
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 13ef: nop
+# CHECK: 13f0: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_0:
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1400: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1421: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1442: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1463: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1484: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 14a5: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 14c6: nop
+# CHECK: 14d0: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 14e7: nop
+# CHECK: 14f0: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1508: nop
+# CHECK: 1510: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1529: nop
+# CHECK: 1530: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 154a: nop
+# CHECK: 1550: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 156b: nop
+# CHECK: 1570: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 158c: nop
+# CHECK: 1590: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 15ad: nop
+# CHECK: 15b0: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 15ce: nop
+# CHECK: 15d0: incl
+
+ .align 32, 0x90
+INSTRLEN_11_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 11
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 15ef: nop
+# CHECK: 15f0: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_0:
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1600: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1621: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1642: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1663: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1684: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 16a5: nop
+# CHECK: 16b0: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 16c6: nop
+# CHECK: 16d0: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 16e7: nop
+# CHECK: 16f0: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1708: nop
+# CHECK: 1710: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1729: nop
+# CHECK: 1730: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 174a: nop
+# CHECK: 1750: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 176b: nop
+# CHECK: 1770: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 178c: nop
+# CHECK: 1790: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 17ad: nop
+# CHECK: 17b0: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 17ce: nop
+# CHECK: 17d0: incl
+
+ .align 32, 0x90
+INSTRLEN_12_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 12
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 17ef: nop
+# CHECK: 17f0: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_0:
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1800: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1821: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1842: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1863: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1884: nop
+# CHECK: 1890: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 18a5: nop
+# CHECK: 18b0: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 18c6: nop
+# CHECK: 18d0: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 18e7: nop
+# CHECK: 18f0: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1908: nop
+# CHECK: 1910: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1929: nop
+# CHECK: 1930: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 194a: nop
+# CHECK: 1950: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 196b: nop
+# CHECK: 1970: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 198c: nop
+# CHECK: 1990: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 19ad: nop
+# CHECK: 19b0: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 19ce: nop
+# CHECK: 19d0: incl
+
+ .align 32, 0x90
+INSTRLEN_13_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 13
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 19ef: nop
+# CHECK: 19f0: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_0:
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a00: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a21: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a42: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a63: nop
+# CHECK: 1a70: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1a84: nop
+# CHECK: 1a90: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1aa5: nop
+# CHECK: 1ab0: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ac6: nop
+# CHECK: 1ad0: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ae7: nop
+# CHECK: 1af0: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b08: nop
+# CHECK: 1b10: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b29: nop
+# CHECK: 1b30: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b4a: nop
+# CHECK: 1b50: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b6b: nop
+# CHECK: 1b70: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1b8c: nop
+# CHECK: 1b90: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1bad: nop
+# CHECK: 1bb0: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1bce: nop
+# CHECK: 1bd0: incl
+
+ .align 32, 0x90
+INSTRLEN_14_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 14
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1bef: nop
+# CHECK: 1bf0: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_0:
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c00: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c21: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c42: nop
+# CHECK: 1c50: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c63: nop
+# CHECK: 1c70: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1c84: nop
+# CHECK: 1c90: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ca5: nop
+# CHECK: 1cb0: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1cc6: nop
+# CHECK: 1cd0: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ce7: nop
+# CHECK: 1cf0: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d08: nop
+# CHECK: 1d10: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d29: nop
+# CHECK: 1d30: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d4a: nop
+# CHECK: 1d50: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d6b: nop
+# CHECK: 1d70: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1d8c: nop
+# CHECK: 1d90: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1dad: nop
+# CHECK: 1db0: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1dce: nop
+# CHECK: 1dd0: incl
+
+ .align 32, 0x90
+INSTRLEN_15_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 15
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1def: nop
+# CHECK: 1df0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_0:
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e00: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_1:
+ .fill 1, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e21: nop
+# CHECK: 1e30: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_2:
+ .fill 2, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e42: nop
+# CHECK: 1e50: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_3:
+ .fill 3, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e63: nop
+# CHECK: 1e70: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_4:
+ .fill 4, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1e84: nop
+# CHECK: 1e90: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_5:
+ .fill 5, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ea5: nop
+# CHECK: 1eb0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_6:
+ .fill 6, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ec6: nop
+# CHECK: 1ed0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_7:
+ .fill 7, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1ee7: nop
+# CHECK: 1ef0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_8:
+ .fill 8, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f08: nop
+# CHECK: 1f10: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_9:
+ .fill 9, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f29: nop
+# CHECK: 1f30: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_10:
+ .fill 10, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f4a: nop
+# CHECK: 1f50: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_11:
+ .fill 11, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f6b: nop
+# CHECK: 1f70: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_12:
+ .fill 12, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1f8c: nop
+# CHECK: 1f90: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_13:
+ .fill 13, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1fad: nop
+# CHECK: 1fb0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_14:
+ .fill 14, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1fce: nop
+# CHECK: 1fd0: incl
+
+ .align 32, 0x90
+INSTRLEN_16_OFFSET_15:
+ .fill 15, 1, 0x90
+ .bundle_lock
+ .rept 16
+ inc %eax
+ .endr
+ .bundle_unlock
+# CHECK: 1fef: nop
+# CHECK: 1ff0: incl
+
diff --git a/test/MC/X86/AlignedBundling/bundle-group-too-large-error.s b/test/MC/X86/AlignedBundling/bundle-group-too-large-error.s
new file mode 100644
index 0000000000000..722bf7b9227f5
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/bundle-group-too-large-error.s
@@ -0,0 +1,17 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+# CHECK: ERROR: Fragment can't be larger than a bundle size
+
+ .text
+foo:
+ .bundle_align_mode 4
+ pushq %rbp
+
+ .bundle_lock
+ pushq %r14
+ callq bar
+ callq bar
+ callq bar
+ callq bar
+ .bundle_unlock
+
diff --git a/test/MC/X86/AlignedBundling/bundle-lock-option-error.s b/test/MC/X86/AlignedBundling/bundle-lock-option-error.s
new file mode 100644
index 0000000000000..82c5d7cf0e7b0
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/bundle-lock-option-error.s
@@ -0,0 +1,11 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+# Missing .bundle_align_mode argument
+# CHECK: error: invalid option
+
+ .bundle_align_mode 4
+ .bundle_lock 5
+ imull $17, %ebx, %ebp
+ .bundle_unlock
+
+
diff --git a/test/MC/X86/AlignedBundling/different-sections.s b/test/MC/X86/AlignedBundling/different-sections.s
new file mode 100644
index 0000000000000..3e9fcf376d2d6
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/different-sections.s
@@ -0,0 +1,25 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -disassemble -no-show-raw-insn - | FileCheck %s
+
+# Test two different executable sections with bundling.
+
+ .bundle_align_mode 3
+ .section text1, "x"
+# CHECK: section text1
+ imull $17, %ebx, %ebp
+ imull $17, %ebx, %ebp
+
+ imull $17, %ebx, %ebp
+# CHECK: 6: nop
+# CHECK-NEXT: 8: imull
+
+ .section text2, "x"
+# CHECK: section text2
+ imull $17, %ebx, %ebp
+ imull $17, %ebx, %ebp
+
+ imull $17, %ebx, %ebp
+# CHECK: 6: nop
+# CHECK-NEXT: 8: imull
+
+
diff --git a/test/MC/X86/AlignedBundling/lit.local.cfg b/test/MC/X86/AlignedBundling/lit.local.cfg
new file mode 100644
index 0000000000000..6c49f08b7496d
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.s']
+
+targets = set(config.root.targets_to_build.split())
+if not 'X86' in targets:
+ config.unsupported = True
+
diff --git a/test/MC/X86/AlignedBundling/lock-without-bundle-mode-error.s b/test/MC/X86/AlignedBundling/lock-without-bundle-mode-error.s
new file mode 100644
index 0000000000000..d45a9b4a5dfb0
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/lock-without-bundle-mode-error.s
@@ -0,0 +1,10 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+# .bundle_lock can't come without a .bundle_align_mode before it
+
+# CHECK: ERROR: .bundle_lock forbidden when bundling is disabled
+
+ imull $17, %ebx, %ebp
+ .bundle_lock
+
+
diff --git a/test/MC/X86/AlignedBundling/long-nop-pad.s b/test/MC/X86/AlignedBundling/long-nop-pad.s
new file mode 100644
index 0000000000000..ea33e2889b9ea
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/long-nop-pad.s
@@ -0,0 +1,27 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -disassemble -no-show-raw-insn - | FileCheck %s
+
+# Test that long nops are generated for padding where possible.
+
+ .text
+foo:
+ .bundle_align_mode 5
+
+# This callq instruction is 5 bytes long
+ .bundle_lock align_to_end
+ callq bar
+ .bundle_unlock
+# To align this group to a bundle end, we need a 15-byte NOP and a 12-byte NOP.
+# CHECK: 0: nop
+# CHECK-NEXT: f: nop
+# CHECK-NEXT: 1b: callq
+
+# This push instruction is 1 byte long
+ .bundle_lock align_to_end
+ push %rax
+ .bundle_unlock
+# To align this group to a bundle end, we need two 15-byte NOPs, and a 1-byte.
+# CHECK: 20: nop
+# CHECK-NEXT: 2f: nop
+# CHECK-NEXT: 3e: nop
+# CHECK-NEXT: 3f: pushq
diff --git a/test/MC/X86/AlignedBundling/pad-align-to-bundle-end.s b/test/MC/X86/AlignedBundling/pad-align-to-bundle-end.s
new file mode 100644
index 0000000000000..6ca4046f0c7b9
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/pad-align-to-bundle-end.s
@@ -0,0 +1,33 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -disassemble -no-show-raw-insn - | FileCheck %s
+
+# Test some variations of padding to the end of a bundle.
+
+ .text
+foo:
+ .bundle_align_mode 4
+
+# Each of these callq instructions is 5 bytes long
+ callq bar
+ callq bar
+ .bundle_lock align_to_end
+ callq bar
+ .bundle_unlock
+# To align this group to a bundle end, we need a 1-byte NOP.
+# CHECK: a: nop
+# CHECK-NEXT: b: callq
+
+ callq bar
+ callq bar
+ .bundle_lock align_to_end
+ callq bar
+ callq bar
+ .bundle_unlock
+# Here we have to pad until the end of the *next* boundary because
+# otherwise the group crosses a boundary.
+# CHECK: 1a: nop
+# The nop sequence may be implemented as one instruction or many, but if
+# it's one instruction, that instruction cannot itself cross the boundary.
+# CHECK: 20: nop
+# CHECK-NEXT: 26: callq
+# CHECK-NEXT: 2b: callq
diff --git a/test/MC/X86/AlignedBundling/pad-bundle-groups.s b/test/MC/X86/AlignedBundling/pad-bundle-groups.s
new file mode 100644
index 0000000000000..b65ee7a5cc745
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/pad-bundle-groups.s
@@ -0,0 +1,46 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -disassemble -no-show-raw-insn - | FileCheck %s
+
+# Test some variations of padding for bundle-locked groups.
+
+ .text
+foo:
+ .bundle_align_mode 4
+
+# Each of these callq instructions is 5 bytes long
+ callq bar
+ callq bar
+
+ .bundle_lock
+ callq bar
+ callq bar
+ .bundle_unlock
+# We'll need a 6-byte NOP before this group
+# CHECK: a: nop
+# CHECK-NEXT: 10: callq
+# CHECK-NEXT: 15: callq
+
+ .bundle_lock
+ callq bar
+ callq bar
+ .bundle_unlock
+# Same here
+# CHECK: 1a: nop
+# CHECK-NEXT: 20: callq
+# CHECK-NEXT: 25: callq
+
+ .align 16, 0x90
+ callq bar
+ .bundle_lock
+ callq bar
+ callq bar
+ callq bar
+ .bundle_unlock
+# And here we'll need a 11-byte NOP
+# CHECK: 30: callq
+# CHECK: 35: nop
+# CHECK-NEXT: 40: callq
+# CHECK-NEXT: 45: callq
+
+
+
diff --git a/test/MC/X86/AlignedBundling/relax-at-bundle-end.s b/test/MC/X86/AlignedBundling/relax-at-bundle-end.s
new file mode 100644
index 0000000000000..ab4affbbeac8f
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/relax-at-bundle-end.s
@@ -0,0 +1,16 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -disassemble -no-show-raw-insn - | FileCheck %s
+
+# Test that an instruction near a bundle end gets properly padded
+# after it is relaxed.
+.text
+foo:
+ .bundle_align_mode 5
+ .rept 29
+ push %rax
+ .endr
+# CHECK: 1c: push
+# CHECK: 1d: nop
+# CHECK: 20: jne
+ jne 0x100
+
diff --git a/test/MC/X86/AlignedBundling/relax-in-bundle-group.s b/test/MC/X86/AlignedBundling/relax-in-bundle-group.s
new file mode 100644
index 0000000000000..0a99bb5ce5637
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/relax-in-bundle-group.s
@@ -0,0 +1,42 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -disassemble - | FileCheck %s
+
+# Test that instructions inside bundle-locked groups are relaxed even if their
+# fixup is short enough not to warrant relaxation on its own.
+
+ .text
+foo:
+ .bundle_align_mode 4
+ pushq %rbp
+
+ movl %edi, %ebx
+ callq bar
+ movl %eax, %r14d
+ imull $17, %ebx, %ebp
+ movl %ebx, %edi
+ callq bar
+ cmpl %r14d, %ebp
+ .bundle_lock
+
+ jle .L_ELSE
+# This group would've started at 0x18 and is too long, so a chunky NOP padding
+# is inserted to push it to 0x20.
+# CHECK: 18: {{[a-f0-9 ]+}} nopl
+
+# The long encoding for JLE should be used here even though its target is close
+# CHECK-NEXT: 20: 0f 8e
+
+ addl %ebp, %eax
+
+ jmp .L_RET
+# Same for the JMP
+# CHECK: 28: e9
+
+ .bundle_unlock
+
+.L_ELSE:
+ imull %ebx, %eax
+.L_RET:
+
+ popq %rbx
+
diff --git a/test/MC/X86/AlignedBundling/single-inst-bundling.s b/test/MC/X86/AlignedBundling/single-inst-bundling.s
new file mode 100644
index 0000000000000..c0275f4d1ecbc
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/single-inst-bundling.s
@@ -0,0 +1,47 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - \
+# RUN: | llvm-objdump -disassemble -no-show-raw-insn - | FileCheck %s
+
+# Test simple NOP insertion for single instructions.
+
+ .text
+foo:
+ # Will be bundle-aligning to 16 byte boundaries
+ .bundle_align_mode 4
+ pushq %rbp
+ pushq %r14
+ pushq %rbx
+
+ movl %edi, %ebx
+ callq bar
+ movl %eax, %r14d
+
+ imull $17, %ebx, %ebp
+# This imull is 3 bytes long and should have started at 0xe, so two bytes
+# of nop padding are inserted instead and it starts at 0x10
+# CHECK: nop
+# CHECK-NEXT: 10: imull
+
+ movl %ebx, %edi
+ callq bar
+ cmpl %r14d, %ebp
+ jle .L_ELSE
+# Due to the padding that's inserted before the addl, the jump target
+# becomes farther by one byte.
+# CHECK: jle 5
+
+ addl %ebp, %eax
+# CHECK: nop
+# CHECK-NEXT: 20: addl
+
+ jmp .L_RET
+.L_ELSE:
+ imull %ebx, %eax
+.L_RET:
+ ret
+
+# Just sanity checking that data fills don't drive bundling crazy
+ .data
+ .byte 40
+ .byte 98
+
+
diff --git a/test/MC/X86/AlignedBundling/switch-section-locked-error.s b/test/MC/X86/AlignedBundling/switch-section-locked-error.s
new file mode 100644
index 0000000000000..af41e19212527
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/switch-section-locked-error.s
@@ -0,0 +1,16 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+# This test invokes .bundle_lock and then switches to a different section
+# w/o the appropriate unlock.
+
+# CHECK: ERROR: Unterminated .bundle_lock
+
+ .bundle_align_mode 3
+ .section text1, "x"
+ imull $17, %ebx, %ebp
+ .bundle_lock
+ imull $17, %ebx, %ebp
+
+ .section text2, "x"
+ imull $17, %ebx, %ebp
+
diff --git a/test/MC/X86/AlignedBundling/unlock-without-lock-error.s b/test/MC/X86/AlignedBundling/unlock-without-lock-error.s
new file mode 100644
index 0000000000000..699511d4e6b68
--- /dev/null
+++ b/test/MC/X86/AlignedBundling/unlock-without-lock-error.s
@@ -0,0 +1,11 @@
+# RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+# .bundle_unlock can't come without a .bundle_lock before it
+
+# CHECK: ERROR: .bundle_unlock without matching lock
+
+ .bundle_align_mode 3
+ imull $17, %ebx, %ebp
+ .bundle_unlock
+
+
diff --git a/test/MC/X86/fde-reloc.s b/test/MC/X86/fde-reloc.s
new file mode 100644
index 0000000000000..63ac976621884
--- /dev/null
+++ b/test/MC/X86/fde-reloc.s
@@ -0,0 +1,11 @@
+// RUN: llvm-mc -filetype=obj %s -o - -triple x86_64-pc-linux | llvm-objdump -r - | FileCheck --check-prefix=X86-64 %s
+// RUN: llvm-mc -filetype=obj %s -o - -triple i686-pc-linux | llvm-objdump -r - | FileCheck --check-prefix=I686 %s
+
+// PR15448
+
+func:
+ .cfi_startproc
+ .cfi_endproc
+
+// X86-64: R_X86_64_PC32
+// I686: R_386_PC32
diff --git a/test/MC/X86/gnux32-dwarf-gen.s b/test/MC/X86/gnux32-dwarf-gen.s
new file mode 100644
index 0000000000000..6603125343d0e
--- /dev/null
+++ b/test/MC/X86/gnux32-dwarf-gen.s
@@ -0,0 +1,24 @@
+# RUN: llvm-mc -g -filetype=obj -triple x86_64-pc-linux-gnu %s -o %t.64
+# RUN: llvm-dwarfdump -debug-dump=info %t.64 | FileCheck -check-prefix=DEFAULTABI %s
+
+# RUN: llvm-mc -g -filetype=obj -triple x86_64-pc-linux-gnux32 %s -o %t.32
+# RUN: llvm-dwarfdump -debug-dump=info %t.32 | FileCheck -check-prefix=X32ABI %s
+
+# This test checks the dwarf info section emitted to the output object by the
+# assembler, looking at the difference between the x32 ABI and default x86-64
+# ABI.
+
+# DEFAULTABI: addr_size = 0x08
+# X32ABI: addr_size = 0x04
+
+.globl _bar
+_bar:
+ movl $0, %eax
+L1: leave
+ ret
+_foo:
+_baz:
+ nop
+.data
+_x: .long 1
+
diff --git a/test/MC/X86/intel-syntax-encoding.s b/test/MC/X86/intel-syntax-encoding.s
index 03b05511649aa..9806ac3802e79 100644
--- a/test/MC/X86/intel-syntax-encoding.s
+++ b/test/MC/X86/intel-syntax-encoding.s
@@ -31,6 +31,27 @@
// CHECK: encoding: [0x48,0x83,0xc0,0xf4]
add rax, -12
+// CHECK: encoding: [0x66,0x83,0xd0,0xf4]
+ adc ax, -12
+// CHECK: encoding: [0x83,0xd0,0xf4]
+ adc eax, -12
+// CHECK: encoding: [0x48,0x83,0xd0,0xf4]
+ adc rax, -12
+
+// CHECK: encoding: [0x66,0x83,0xd8,0xf4]
+ sbb ax, -12
+// CHECK: encoding: [0x83,0xd8,0xf4]
+ sbb eax, -12
+// CHECK: encoding: [0x48,0x83,0xd8,0xf4]
+ sbb rax, -12
+
+// CHECK: encoding: [0x66,0x83,0xf8,0xf4]
+ cmp ax, -12
+// CHECK: encoding: [0x83,0xf8,0xf4]
+ cmp eax, -12
+// CHECK: encoding: [0x48,0x83,0xf8,0xf4]
+ cmp rax, -12
+
LBB0_3:
// CHECK: encoding: [0xeb,A]
jmp LBB0_3
diff --git a/test/MC/X86/intel-syntax-hex.s b/test/MC/X86/intel-syntax-hex.s
new file mode 100644
index 0000000000000..b3a19fbaa3450
--- /dev/null
+++ b/test/MC/X86/intel-syntax-hex.s
@@ -0,0 +1,26 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown -x86-asm-syntax=intel %s | FileCheck %s
+// rdar://12470373
+
+// Checks to make sure we parse the hexadecimal suffix properly.
+// CHECK: movl $10, %eax
+ mov eax, 10
+// CHECK: movl $16, %eax
+ mov eax, 10h
+// CHECK: movl $16, %eax
+ mov eax, 10H
+// CHECK: movl $4294967295, %eax
+ mov eax, 0ffffffffh
+// CHECK: movl $4294967295, %eax
+ mov eax, 0xffffffff
+// CHECK: movl $4294967295, %eax
+ mov eax, 0xffffffffh
+// CHECK: movl $15, %eax
+ mov eax, 0fh
+// CHECK: movl $162, %eax
+ mov eax, 0a2h
+// CHECK: movl $162, %eax
+ mov eax, 0xa2
+// CHECK: movl $162, %eax
+ mov eax, 0xa2h
+// CHECK: movl $674, %eax
+ mov eax, 2a2h
diff --git a/test/MC/X86/intel-syntax.s b/test/MC/X86/intel-syntax.s
index 7edd26a1382f3..8bfa58a4bed86 100644
--- a/test/MC/X86/intel-syntax.s
+++ b/test/MC/X86/intel-syntax.s
@@ -56,13 +56,195 @@ _main:
// CHECK: fld %st(0)
fld ST(0)
// CHECK: movl %fs:(%rdi), %eax
- mov EAX, DWORD PTR FS:[RDI]
-// CHECK: leal (,%rdi,4), %r8d
- lea R8D, DWORD PTR [4*RDI]
-// CHECK: movl _fnan(,%ecx,4), %ecx
- mov ECX, DWORD PTR [4*ECX + _fnan]
-// CHECK: movq %fs:320, %rax
- mov RAX, QWORD PTR FS:[320]
-// CHECK: vpgatherdd %xmm8, (%r15,%xmm9,2), %xmm1
- vpgatherdd XMM10, DWORD PTR [R15 + 2*XMM9], XMM8
+ mov EAX, DWORD PTR FS:[RDI]
+// CHECK: leal (,%rdi,4), %r8d
+ lea R8D, DWORD PTR [4*RDI]
+// CHECK: movl _fnan(,%ecx,4), %ecx
+ mov ECX, DWORD PTR [4*ECX + _fnan]
+// CHECK: movq %fs:320, %rax
+ mov RAX, QWORD PTR FS:[320]
+// CHECK: vpgatherdd %xmm8, (%r15,%xmm9,2), %xmm1
+ vpgatherdd XMM10, DWORD PTR [R15 + 2*XMM9], XMM8
+// CHECK: movsd -8, %xmm5
+ movsd XMM5, QWORD PTR [-8]
+// CHECK: movl %ecx, (%eax)
+ mov [eax], ecx
+// CHECK: movl %ecx, (,%ebx,4)
+ mov [4*ebx], ecx
+ // CHECK: movl %ecx, (,%ebx,4)
+ mov [ebx*4], ecx
+// CHECK: movl %ecx, 1024
+ mov [1024], ecx
+// CHECK: movl %ecx, 4132
+ mov [0x1024], ecx
+// CHECK: movl %ecx, 32
+ mov [16 + 16], ecx
+// CHECK: movl %ecx, 0
+ mov [16 - 16], ecx
+// CHECK: movl %ecx, 32
+ mov [16][16], ecx
+// CHECK: movl %ecx, (%eax,%ebx,4)
+ mov [eax + 4*ebx], ecx
+// CHECK: movl %ecx, (%eax,%ebx,4)
+ mov [eax + ebx*4], ecx
+// CHECK: movl %ecx, (%eax,%ebx,4)
+ mov [4*ebx + eax], ecx
+// CHECK: movl %ecx, (%eax,%ebx,4)
+ mov [ebx*4 + eax], ecx
+// CHECK: movl %ecx, (%eax,%ebx,4)
+ mov [eax][4*ebx], ecx
+// CHECK: movl %ecx, (%eax,%ebx,4)
+ mov [eax][ebx*4], ecx
+// CHECK: movl %ecx, (%eax,%ebx,4)
+ mov [4*ebx][eax], ecx
+// CHECK: movl %ecx, (%eax,%ebx,4)
+ mov [ebx*4][eax], ecx
+// CHECK: movl %ecx, 12(%eax)
+ mov [eax + 12], ecx
+// CHECK: movl %ecx, 12(%eax)
+ mov [12 + eax], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [eax + 16 + 16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [16 + eax + 16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [16 + 16 + eax], ecx
+// CHECK: movl %ecx, 12(%eax)
+ mov [eax][12], ecx
+// CHECK: movl %ecx, 12(%eax)
+ mov [12][eax], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [eax][16 + 16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [eax + 16][16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [eax][16][16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [16][eax + 16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [16 + eax][16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [16][16 + eax], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [16 + 16][eax], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [eax][16][16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [16][eax][16], ecx
+// CHECK: movl %ecx, 32(%eax)
+ mov [16][16][eax], ecx
+// CHECK: movl %ecx, 16(,%ebx,4)
+ mov [4*ebx + 16], ecx
+// CHECK: movl %ecx, 16(,%ebx,4)
+ mov [ebx*4 + 16], ecx
+// CHECK: movl %ecx, 16(,%ebx,4)
+ mov [4*ebx][16], ecx
+// CHECK: movl %ecx, 16(,%ebx,4)
+ mov [ebx*4][16], ecx
+// CHECK: movl %ecx, 16(,%ebx,4)
+ mov [16 + 4*ebx], ecx
+// CHECK: movl %ecx, 16(,%ebx,4)
+ mov [16 + ebx*4], ecx
+// CHECK: movl %ecx, 16(,%ebx,4)
+ mov [16][4*ebx], ecx
+// CHECK: movl %ecx, 16(,%ebx,4)
+ mov [16][ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax + 4*ebx + 16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax + 16 + 4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [4*ebx + eax + 16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [4*ebx + 16 + eax], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16 + eax + 4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16 + eax + 4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax][4*ebx + 16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax][16 + 4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [4*ebx][eax + 16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [4*ebx][16 + eax], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16][eax + 4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16][eax + 4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax + 4*ebx][16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax + 16][4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [4*ebx + eax][16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [4*ebx + 16][eax], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16 + eax][4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16 + eax][4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax][4*ebx][16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax][16][4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [4*ebx][eax][16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [4*ebx][16][eax], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16][eax][4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16][eax][4*ebx], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax + ebx*4 + 16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax + 16 + ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [ebx*4 + eax + 16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [ebx*4 + 16 + eax], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16 + eax + ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16 + eax + ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax][ebx*4 + 16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax][16 + ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [ebx*4][eax + 16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [ebx*4][16 + eax], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16][eax + ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16][eax + ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax + ebx*4][16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax + 16][ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [ebx*4 + eax][16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [ebx*4 + 16][eax], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16 + eax][ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16 + eax][ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax][ebx*4][16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [eax][16][ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [ebx*4][eax][16], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [ebx*4][16][eax], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16][eax][ebx*4], ecx
+// CHECK: movl %ecx, 16(%eax,%ebx,4)
+ mov [16][eax][ebx*4], ecx
+// CHECK: movl %ecx, -16(%eax,%ebx,4)
+ mov [eax][ebx*4 - 16], ecx
ret
diff --git a/test/MC/X86/lit.local.cfg b/test/MC/X86/lit.local.cfg
index eee568e8fdc2b..ad280c7cf7dec 100644
--- a/test/MC/X86/lit.local.cfg
+++ b/test/MC/X86/lit.local.cfg
@@ -1,12 +1,5 @@
config.suffixes = ['.ll', '.c', '.cpp', '.s']
-def getRoot(config):
- if not config.parent:
- return config
- return getRoot(config.parent)
-
-root = getRoot(config)
-
-targets = set(root.targets_to_build.split())
+targets = set(config.root.targets_to_build.split())
if not 'X86' in targets:
config.unsupported = True
diff --git a/test/MC/X86/shuffle-comments.s b/test/MC/X86/shuffle-comments.s
new file mode 100644
index 0000000000000..20fd4ebae4dc7
--- /dev/null
+++ b/test/MC/X86/shuffle-comments.s
@@ -0,0 +1,271 @@
+# RUN: llvm-mc %s -triple=x86_64-unknown-unknown | FileCheck %s
+
+palignr $8, %xmm0, %xmm1
+# CHECK: xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+palignr $8, (%rax), %xmm1
+# CHECK: xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+
+palignr $16, %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+palignr $16, (%rax), %xmm1
+# CHECK: xmm1 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+
+palignr $0, %xmm0, %xmm1
+# CHECK: xmm1 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+palignr $0, (%rax), %xmm1
+# CHECK: xmm1 = mem[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+
+vpalignr $8, %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+vpalignr $8, (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+
+vpalignr $16, %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+vpalignr $16, (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+
+vpalignr $0, %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+vpalignr $0, (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = mem[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+
+vpalignr $8, %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm0[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
+vpalignr $8, (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
+
+vpalignr $16, %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+vpalignr $16, (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+
+vpalignr $0, %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+vpalignr $0, (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = mem[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+
+pshufd $27, %xmm0, %xmm1
+# CHECK: xmm1 = xmm0[3,2,1,0]
+pshufd $27, (%rax), %xmm1
+# CHECK: xmm1 = mem[3,2,1,0]
+
+vpshufd $27, %xmm0, %xmm1
+# CHECK: xmm1 = xmm0[3,2,1,0]
+vpshufd $27, (%rax), %xmm1
+# CHECK: xmm1 = mem[3,2,1,0]
+
+vpshufd $27, %ymm0, %ymm1
+# CHECK: ymm1 = ymm0[3,2,1,0,7,6,5,4]
+vpshufd $27, (%rax), %ymm1
+# CHECK: ymm1 = mem[3,2,1,0,7,6,5,4]
+
+punpcklbw %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+punpcklbw (%rax), %xmm1
+# CHECK: xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+
+vpunpcklbw %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+vpunpcklbw (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+
+vpunpcklbw %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+vpunpcklbw (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[4],mem[4],ymm1[5],mem[5],ymm1[6],mem[6],ymm1[7],mem[7],ymm1[16],mem[16],ymm1[17],mem[17],ymm1[18],mem[18],ymm1[19],mem[19],ymm1[20],mem[20],ymm1[21],mem[21],ymm1[22],mem[22],ymm1[23],mem[23]
+
+punpckhbw %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+punpckhbw (%rax), %xmm1
+# CHECK: xmm1 = xmm1[8],mem[8],xmm1[9],mem[9],xmm1[10],mem[10],xmm1[11],mem[11],xmm1[12],mem[12],xmm1[13],mem[13],xmm1[14],mem[14],xmm1[15],mem[15]
+
+vpunpckhbw %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+vpunpckhbw (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[8],mem[8],xmm1[9],mem[9],xmm1[10],mem[10],xmm1[11],mem[11],xmm1[12],mem[12],xmm1[13],mem[13],xmm1[14],mem[14],xmm1[15],mem[15]
+
+vpunpckhbw %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+vpunpckhbw (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11],ymm1[12],mem[12],ymm1[13],mem[13],ymm1[14],mem[14],ymm1[15],mem[15],ymm1[24],mem[24],ymm1[25],mem[25],ymm1[26],mem[26],ymm1[27],mem[27],ymm1[28],mem[28],ymm1[29],mem[29],ymm1[30],mem[30],ymm1[31],mem[31]
+
+punpcklwd %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+punpcklwd (%rax), %xmm1
+# CHECK: xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+
+vpunpcklwd %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+vpunpcklwd (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+
+vpunpcklwd %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+vpunpcklwd (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
+
+punpckhwd %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+punpckhwd (%rax), %xmm1
+# CHECK: xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+
+vpunpckhwd %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+vpunpckhwd (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+
+vpunpckhwd %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+vpunpckhwd (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[4],mem[4],ymm1[5],mem[5],ymm1[6],mem[6],ymm1[7],mem[7],ymm1[12],mem[12],ymm1[13],mem[13],ymm1[14],mem[14],ymm1[15],mem[15]
+
+punpckldq %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+punpckldq (%rax), %xmm1
+# CHECK: xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+
+vpunpckldq %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+vpunpckldq (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],mem[0],xmm1[1],mem[1]
+
+vpunpckldq %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+vpunpckldq (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
+
+punpckhdq %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+punpckhdq (%rax), %xmm1
+# CHECK: xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
+
+vpunpckhdq %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+vpunpckhdq (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[2],mem[2],xmm1[3],mem[3]
+
+vpunpckhdq %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
+vpunpckhdq (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+
+punpcklqdq %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[0],xmm0[0]
+punpcklqdq (%rax), %xmm1
+# CHECK: xmm1 = xmm1[0],mem[0]
+
+vpunpcklqdq %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],xmm0[0]
+vpunpcklqdq (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],mem[0]
+
+vpunpcklqdq %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+vpunpcklqdq (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],mem[0],ymm1[2],mem[2]
+
+punpckhqdq %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[1],xmm0[1]
+punpckhqdq (%rax), %xmm1
+# CHECK: xmm1 = xmm1[1],mem[1]
+
+vpunpckhqdq %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[1],xmm0[1]
+vpunpckhqdq (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[1],mem[1]
+
+vpunpckhqdq %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+vpunpckhqdq (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[1],mem[1],ymm1[3],mem[3]
+
+unpcklps %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+unpcklps (%rax), %xmm1
+# CHECK: xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+
+vunpcklps %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+vunpcklps (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],mem[0],xmm1[1],mem[1]
+
+vunpcklps %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+vunpcklps (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
+
+unpckhps %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+unpckhps (%rax), %xmm1
+# CHECK: xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
+
+vunpckhps %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+vunpckhps (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[2],mem[2],xmm1[3],mem[3]
+
+vunpckhps %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
+vunpckhps (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+
+unpcklpd %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[0],xmm0[0]
+unpcklpd (%rax), %xmm1
+# CHECK: xmm1 = xmm1[0],mem[0]
+
+vunpcklpd %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],xmm0[0]
+vunpcklpd (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[0],mem[0]
+
+vunpcklpd %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+vunpcklpd (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[0],mem[0],ymm1[2],mem[2]
+
+unpckhpd %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[1],xmm0[1]
+unpckhpd (%rax), %xmm1
+# CHECK: xmm1 = xmm1[1],mem[1]
+
+vunpckhpd %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[1],xmm0[1]
+vunpckhpd (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[1],mem[1]
+
+vunpckhpd %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+vunpckhpd (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[1],mem[1],ymm1[3],mem[3]
+
+shufps $27, %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[3,2],xmm0[1,0]
+shufps $27, (%rax), %xmm1
+# CHECK: xmm1 = xmm1[3,2],mem[1,0]
+
+vshufps $27, %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[3,2],xmm0[1,0]
+vshufps $27, (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[3,2],mem[1,0]
+
+vshufps $27, %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[3,2],ymm0[1,0],ymm1[7,6],ymm0[5,4]
+vshufps $27, (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[3,2],mem[1,0],ymm1[7,6],mem[5,4]
+
+shufpd $3, %xmm0, %xmm1
+# CHECK: xmm1 = xmm1[1],xmm0[1]
+shufpd $3, (%rax), %xmm1
+# CHECK: xmm1 = xmm1[1],mem[1]
+
+vshufpd $3, %xmm0, %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[1],xmm0[1]
+vshufpd $3, (%rax), %xmm1, %xmm2
+# CHECK: xmm2 = xmm1[1],mem[1]
+
+vshufpd $11, %ymm0, %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[1],ymm0[1],ymm1[2],ymm0[3]
+vshufpd $11, (%rax), %ymm1, %ymm2
+# CHECK: ymm2 = ymm1[1],mem[1],ymm1[2],mem[3]
diff --git a/test/MC/X86/x86-32-avx.s b/test/MC/X86/x86-32-avx.s
index 586f3fe73c57b..ec4abdbb2a8bf 100644
--- a/test/MC/X86/x86-32-avx.s
+++ b/test/MC/X86/x86-32-avx.s
@@ -655,14 +655,22 @@
// CHECK: encoding: [0xc5,0xfa,0x2c,0x01]
vcvttss2si (%ecx), %eax
-// CHECK: vcvtsi2ss (%eax), %xmm1, %xmm2
+// CHECK: vcvtsi2ssl (%eax), %xmm1, %xmm2
// CHECK: encoding: [0xc5,0xf2,0x2a,0x10]
vcvtsi2ss (%eax), %xmm1, %xmm2
-// CHECK: vcvtsi2ss (%eax), %xmm1, %xmm2
+// CHECK: vcvtsi2ssl (%eax), %xmm1, %xmm2
// CHECK: encoding: [0xc5,0xf2,0x2a,0x10]
vcvtsi2ss (%eax), %xmm1, %xmm2
+// CHECK: vcvtsi2ssl (%eax), %xmm1, %xmm2
+// CHECK: encoding: [0xc5,0xf2,0x2a,0x10]
+ vcvtsi2ssl (%eax), %xmm1, %xmm2
+
+// CHECK: vcvtsi2ssl (%eax), %xmm1, %xmm2
+// CHECK: encoding: [0xc5,0xf2,0x2a,0x10]
+ vcvtsi2ssl (%eax), %xmm1, %xmm2
+
// CHECK: vcvttsd2si %xmm1, %eax
// CHECK: encoding: [0xc5,0xfb,0x2c,0xc1]
vcvttsd2si %xmm1, %eax
@@ -671,14 +679,22 @@
// CHECK: encoding: [0xc5,0xfb,0x2c,0x01]
vcvttsd2si (%ecx), %eax
-// CHECK: vcvtsi2sd (%eax), %xmm1, %xmm2
+// CHECK: vcvtsi2sdl (%eax), %xmm1, %xmm2
// CHECK: encoding: [0xc5,0xf3,0x2a,0x10]
vcvtsi2sd (%eax), %xmm1, %xmm2
-// CHECK: vcvtsi2sd (%eax), %xmm1, %xmm2
+// CHECK: vcvtsi2sdl (%eax), %xmm1, %xmm2
// CHECK: encoding: [0xc5,0xf3,0x2a,0x10]
vcvtsi2sd (%eax), %xmm1, %xmm2
+// CHECK: vcvtsi2sdl (%eax), %xmm1, %xmm2
+// CHECK: encoding: [0xc5,0xf3,0x2a,0x10]
+ vcvtsi2sdl (%eax), %xmm1, %xmm2
+
+// CHECK: vcvtsi2sdl (%eax), %xmm1, %xmm2
+// CHECK: encoding: [0xc5,0xf3,0x2a,0x10]
+ vcvtsi2sdl (%eax), %xmm1, %xmm2
+
// CHECK: vmovaps (%eax), %xmm2
// CHECK: encoding: [0xc5,0xf8,0x28,0x10]
vmovaps (%eax), %xmm2
@@ -767,14 +783,22 @@
// CHECK: encoding: [0xc5,0xe8,0x12,0xd9]
vmovhlps %xmm1, %xmm2, %xmm3
-// CHECK: vcvtss2sil %xmm1, %eax
+// CHECK: vcvtss2si %xmm1, %eax
// CHECK: encoding: [0xc5,0xfa,0x2d,0xc1]
vcvtss2si %xmm1, %eax
-// CHECK: vcvtss2sil (%eax), %ebx
+// CHECK: vcvtss2si (%eax), %ebx
// CHECK: encoding: [0xc5,0xfa,0x2d,0x18]
vcvtss2si (%eax), %ebx
+// CHECK: vcvtss2si %xmm1, %eax
+// CHECK: encoding: [0xc5,0xfa,0x2d,0xc1]
+ vcvtss2sil %xmm1, %eax
+
+// CHECK: vcvtss2si (%eax), %ebx
+// CHECK: encoding: [0xc5,0xfa,0x2d,0x18]
+ vcvtss2sil (%eax), %ebx
+
// CHECK: vcvtdq2ps %xmm5, %xmm6
// CHECK: encoding: [0xc5,0xf8,0x5b,0xf5]
vcvtdq2ps %xmm5, %xmm6
@@ -3103,19 +3127,35 @@
// CHECK: encoding: [0xc5,0xf8,0x77]
vzeroupper
-// CHECK: vcvtsd2sil %xmm4, %ecx
+// CHECK: vcvtsd2si %xmm4, %ecx
// CHECK: encoding: [0xc5,0xfb,0x2d,0xcc]
vcvtsd2sil %xmm4, %ecx
-// CHECK: vcvtsd2sil (%ecx), %ecx
+// CHECK: vcvtsd2si (%ecx), %ecx
// CHECK: encoding: [0xc5,0xfb,0x2d,0x09]
vcvtsd2sil (%ecx), %ecx
-// CHECK: vcvtsi2sd (%ebp), %xmm0, %xmm7
+// CHECK: vcvtsd2si %xmm4, %ecx
+// CHECK: encoding: [0xc5,0xfb,0x2d,0xcc]
+ vcvtsd2si %xmm4, %ecx
+
+// CHECK: vcvtsd2si (%ecx), %ecx
+// CHECK: encoding: [0xc5,0xfb,0x2d,0x09]
+ vcvtsd2si (%ecx), %ecx
+
+// CHECK: vcvtsi2sdl (%ebp), %xmm0, %xmm7
+// CHECK: encoding: [0xc5,0xfb,0x2a,0x7d,0x00]
+ vcvtsi2sdl (%ebp), %xmm0, %xmm7
+
+// CHECK: vcvtsi2sdl (%esp), %xmm0, %xmm7
+// CHECK: encoding: [0xc5,0xfb,0x2a,0x3c,0x24]
+ vcvtsi2sdl (%esp), %xmm0, %xmm7
+
+// CHECK: vcvtsi2sdl (%ebp), %xmm0, %xmm7
// CHECK: encoding: [0xc5,0xfb,0x2a,0x7d,0x00]
vcvtsi2sd (%ebp), %xmm0, %xmm7
-// CHECK: vcvtsi2sd (%esp), %xmm0, %xmm7
+// CHECK: vcvtsi2sdl (%esp), %xmm0, %xmm7
// CHECK: encoding: [0xc5,0xfb,0x2a,0x3c,0x24]
vcvtsi2sd (%esp), %xmm0, %xmm7
diff --git a/test/MC/X86/x86-32-coverage.s b/test/MC/X86/x86-32-coverage.s
index 0824916519277..c348915d23ce3 100644
--- a/test/MC/X86/x86-32-coverage.s
+++ b/test/MC/X86/x86-32-coverage.s
@@ -896,11 +896,11 @@
// CHECK: cvtps2pi %xmm5, %mm3
cvtps2pi %xmm5,%mm3
-// CHECK: cvtsi2ss %ecx, %xmm5
- cvtsi2ss %ecx,%xmm5
+// CHECK: cvtsi2ssl %ecx, %xmm5
+ cvtsi2ssl %ecx,%xmm5
-// CHECK: cvtsi2ss 3735928559(%ebx,%ecx,8), %xmm5
- cvtsi2ss 0xdeadbeef(%ebx,%ecx,8),%xmm5
+// CHECK: cvtsi2ssl 3735928559(%ebx,%ecx,8), %xmm5
+ cvtsi2ssl 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: cvttps2pi 3735928559(%ebx,%ecx,8), %mm3
cvttps2pi 0xdeadbeef(%ebx,%ecx,8),%mm3
@@ -1157,11 +1157,11 @@
// CHECK: cvtpi2pd %mm3, %xmm5
cvtpi2pd %mm3,%xmm5
-// CHECK: cvtsi2sd %ecx, %xmm5
- cvtsi2sd %ecx,%xmm5
+// CHECK: cvtsi2sdl %ecx, %xmm5
+ cvtsi2sdl %ecx,%xmm5
-// CHECK: cvtsi2sd 3735928559(%ebx,%ecx,8), %xmm5
- cvtsi2sd 0xdeadbeef(%ebx,%ecx,8),%xmm5
+// CHECK: cvtsi2sdl 3735928559(%ebx,%ecx,8), %xmm5
+ cvtsi2sdl 0xdeadbeef(%ebx,%ecx,8),%xmm5
// CHECK: divpd %xmm5, %xmm5
divpd %xmm5,%xmm5
@@ -3948,6 +3948,10 @@
// CHECK: encoding: [0xd9,0xca]
fxch %st(2)
+// CHECK: fcom
+// CHECK: encoding: [0xd8,0xd1]
+ fcom
+
// CHECK: fcom %st(2)
// CHECK: encoding: [0xd8,0xd2]
fcom %st(2)
@@ -3968,6 +3972,10 @@
// CHECK: encoding: [0xda,0x15,0x78,0x56,0x34,0x12]
ficoml 0x12345678
+// CHECK: fcomp
+// CHECK: encoding: [0xd8,0xd9]
+ fcomp
+
// CHECK: fcomp %st(2)
// CHECK: encoding: [0xd8,0xda]
fcomp %st(2)
@@ -7144,29 +7152,29 @@
// CHECK: encoding: [0x0f,0x2d,0xdd]
cvtps2pi %xmm5,%mm3
-// CHECK: cvtsi2ss %ecx, %xmm5
+// CHECK: cvtsi2ssl %ecx, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0xe9]
- cvtsi2ss %ecx,%xmm5
+ cvtsi2ssl %ecx,%xmm5
-// CHECK: cvtsi2ss 3735928559(%ebx,%ecx,8), %xmm5
+// CHECK: cvtsi2ssl 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0xac,0xcb,0xef,0xbe,0xad,0xde]
- cvtsi2ss 0xdeadbeef(%ebx,%ecx,8),%xmm5
+ cvtsi2ssl 0xdeadbeef(%ebx,%ecx,8),%xmm5
-// CHECK: cvtsi2ss 69, %xmm5
+// CHECK: cvtsi2ssl 69, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0x2d,0x45,0x00,0x00,0x00]
- cvtsi2ss 0x45,%xmm5
+ cvtsi2ssl 0x45,%xmm5
-// CHECK: cvtsi2ss 32493, %xmm5
+// CHECK: cvtsi2ssl 32493, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0x2d,0xed,0x7e,0x00,0x00]
- cvtsi2ss 0x7eed,%xmm5
+ cvtsi2ssl 0x7eed,%xmm5
-// CHECK: cvtsi2ss 3133065982, %xmm5
+// CHECK: cvtsi2ssl 3133065982, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0x2d,0xfe,0xca,0xbe,0xba]
- cvtsi2ss 0xbabecafe,%xmm5
+ cvtsi2ssl 0xbabecafe,%xmm5
-// CHECK: cvtsi2ss 305419896, %xmm5
+// CHECK: cvtsi2ssl 305419896, %xmm5
// CHECK: encoding: [0xf3,0x0f,0x2a,0x2d,0x78,0x56,0x34,0x12]
- cvtsi2ss 0x12345678,%xmm5
+ cvtsi2ssl 0x12345678,%xmm5
// CHECK: cvttps2pi 3735928559(%ebx,%ecx,8), %mm3
// CHECK: encoding: [0x0f,0x2c,0x9c,0xcb,0xef,0xbe,0xad,0xde]
@@ -8652,29 +8660,29 @@
// CHECK: encoding: [0x66,0x0f,0x2a,0xeb]
cvtpi2pd %mm3,%xmm5
-// CHECK: cvtsi2sd %ecx, %xmm5
+// CHECK: cvtsi2sdl %ecx, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0xe9]
- cvtsi2sd %ecx,%xmm5
+ cvtsi2sdl %ecx,%xmm5
-// CHECK: cvtsi2sd 3735928559(%ebx,%ecx,8), %xmm5
+// CHECK: cvtsi2sdl 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0xac,0xcb,0xef,0xbe,0xad,0xde]
- cvtsi2sd 0xdeadbeef(%ebx,%ecx,8),%xmm5
+ cvtsi2sdl 0xdeadbeef(%ebx,%ecx,8),%xmm5
-// CHECK: cvtsi2sd 69, %xmm5
+// CHECK: cvtsi2sdl 69, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0x2d,0x45,0x00,0x00,0x00]
- cvtsi2sd 0x45,%xmm5
+ cvtsi2sdl 0x45,%xmm5
-// CHECK: cvtsi2sd 32493, %xmm5
+// CHECK: cvtsi2sdl 32493, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0x2d,0xed,0x7e,0x00,0x00]
- cvtsi2sd 0x7eed,%xmm5
+ cvtsi2sdl 0x7eed,%xmm5
-// CHECK: cvtsi2sd 3133065982, %xmm5
+// CHECK: cvtsi2sdl 3133065982, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0x2d,0xfe,0xca,0xbe,0xba]
- cvtsi2sd 0xbabecafe,%xmm5
+ cvtsi2sdl 0xbabecafe,%xmm5
-// CHECK: cvtsi2sd 305419896, %xmm5
+// CHECK: cvtsi2sdl 305419896, %xmm5
// CHECK: encoding: [0xf2,0x0f,0x2a,0x2d,0x78,0x56,0x34,0x12]
- cvtsi2sd 0x12345678,%xmm5
+ cvtsi2sdl 0x12345678,%xmm5
// CHECK: divpd 3735928559(%ebx,%ecx,8), %xmm5
// CHECK: encoding: [0x66,0x0f,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
@@ -16200,23 +16208,23 @@
// CHECK: cvtps2pi %xmm5, %mm3
cvtps2pi %xmm5,%mm3
-// CHECK: cvtsi2ss %ecx, %xmm5
- cvtsi2ss %ecx,%xmm5
+// CHECK: cvtsi2ssl %ecx, %xmm5
+ cvtsi2ssl %ecx,%xmm5
-// CHECK: cvtsi2ss 3735928559(%ebx,%ecx,8), %xmm5
- cvtsi2ss 0xdeadbeef(%ebx,%ecx,8),%xmm5
+// CHECK: cvtsi2ssl 3735928559(%ebx,%ecx,8), %xmm5
+ cvtsi2ssl 0xdeadbeef(%ebx,%ecx,8),%xmm5
-// CHECK: cvtsi2ss 69, %xmm5
- cvtsi2ss 0x45,%xmm5
+// CHECK: cvtsi2ssl 69, %xmm5
+ cvtsi2ssl 0x45,%xmm5
-// CHECK: cvtsi2ss 32493, %xmm5
- cvtsi2ss 0x7eed,%xmm5
+// CHECK: cvtsi2ssl 32493, %xmm5
+ cvtsi2ssl 0x7eed,%xmm5
-// CHECK: cvtsi2ss 3133065982, %xmm5
- cvtsi2ss 0xbabecafe,%xmm5
+// CHECK: cvtsi2ssl 3133065982, %xmm5
+ cvtsi2ssl 0xbabecafe,%xmm5
-// CHECK: cvtsi2ss 305419896, %xmm5
- cvtsi2ss 0x12345678,%xmm5
+// CHECK: cvtsi2ssl 305419896, %xmm5
+ cvtsi2ssl 0x12345678,%xmm5
// CHECK: cvttps2pi 3735928559(%ebx,%ecx,8), %mm3
cvttps2pi 0xdeadbeef(%ebx,%ecx,8),%mm3
@@ -17334,23 +17342,23 @@
// CHECK: cvtpi2pd %mm3, %xmm5
cvtpi2pd %mm3,%xmm5
-// CHECK: cvtsi2sd %ecx, %xmm5
- cvtsi2sd %ecx,%xmm5
+// CHECK: cvtsi2sdl %ecx, %xmm5
+ cvtsi2sdl %ecx,%xmm5
-// CHECK: cvtsi2sd 3735928559(%ebx,%ecx,8), %xmm5
- cvtsi2sd 0xdeadbeef(%ebx,%ecx,8),%xmm5
+// CHECK: cvtsi2sdl 3735928559(%ebx,%ecx,8), %xmm5
+ cvtsi2sdl 0xdeadbeef(%ebx,%ecx,8),%xmm5
-// CHECK: cvtsi2sd 69, %xmm5
- cvtsi2sd 0x45,%xmm5
+// CHECK: cvtsi2sdl 69, %xmm5
+ cvtsi2sdl 0x45,%xmm5
-// CHECK: cvtsi2sd 32493, %xmm5
- cvtsi2sd 0x7eed,%xmm5
+// CHECK: cvtsi2sdl 32493, %xmm5
+ cvtsi2sdl 0x7eed,%xmm5
-// CHECK: cvtsi2sd 3133065982, %xmm5
- cvtsi2sd 0xbabecafe,%xmm5
+// CHECK: cvtsi2sdl 3133065982, %xmm5
+ cvtsi2sdl 0xbabecafe,%xmm5
-// CHECK: cvtsi2sd 305419896, %xmm5
- cvtsi2sd 0x12345678,%xmm5
+// CHECK: cvtsi2sdl 305419896, %xmm5
+ cvtsi2sdl 0x12345678,%xmm5
// CHECK: divpd 3735928559(%ebx,%ecx,8), %xmm5
divpd 0xdeadbeef(%ebx,%ecx,8),%xmm5
diff --git a/test/MC/X86/x86-32-ms-inline-asm.s b/test/MC/X86/x86-32-ms-inline-asm.s
index 73d5878b41bc0..d912915c585e2 100644
--- a/test/MC/X86/x86-32-ms-inline-asm.s
+++ b/test/MC/X86/x86-32-ms-inline-asm.s
@@ -57,4 +57,37 @@ _t21: ## @t21
// CHECK: movl 4(%esi,%eax,2), %eax
// CHECK: # encoding: [0x8b,0x44,0x46,0x04]
+ mov eax, 4[esi + 2*eax + 4]
+// CHECK: movl 8(%esi,%eax,2), %eax
+// CHECK: # encoding: [0x8b,0x44,0x46,0x08]
+ mov eax, 4[esi][2*eax + 4]
+// CHECK: movl 8(%esi,%eax,2), %eax
+// CHECK: # encoding: [0x8b,0x44,0x46,0x08]
+ mov eax, 4[esi + 2*eax][4]
+// CHECK: movl 8(%esi,%eax,2), %eax
+// CHECK: # encoding: [0x8b,0x44,0x46,0x08]
+ mov eax, 4[esi][2*eax][4]
+// CHECK: movl 8(%esi,%eax,2), %eax
+// CHECK: # encoding: [0x8b,0x44,0x46,0x08]
+ mov eax, 4[esi][2*eax][4][8]
+// CHECK: movl 16(%esi,%eax,2), %eax
+// CHECK: # encoding: [0x8b,0x44,0x46,0x10]
+
+ prefetchnta 64[eax]
+// CHECK: prefetchnta 64(%eax)
+// CHECK: # encoding: [0x0f,0x18,0x40,0x40]
+
+ pusha
+// CHECK: pushal
+// CHECK: # encoding: [0x60]
+ popa
+// CHECK: popal
+// CHECK: # encoding: [0x61]
+ pushad
+// CHECK: pushal
+// CHECK: # encoding: [0x60]
+ popad
+// CHECK: popal
+// CHECK: # encoding: [0x61]
+
ret
diff --git a/test/MC/X86/x86-64.s b/test/MC/X86/x86-64.s
index 03cb62e7cba3e..c5f1d15f8ff0c 100644
--- a/test/MC/X86/x86-64.s
+++ b/test/MC/X86/x86-64.s
@@ -507,15 +507,15 @@ fsave 32493
// rdar://8456382 - cvtsd2si support.
cvtsd2si %xmm1, %rax
-// CHECK: cvtsd2siq %xmm1, %rax
+// CHECK: cvtsd2si %xmm1, %rax
// CHECK: encoding: [0xf2,0x48,0x0f,0x2d,0xc1]
cvtsd2si %xmm1, %eax
-// CHECK: cvtsd2sil %xmm1, %eax
+// CHECK: cvtsd2si %xmm1, %eax
// CHECK: encoding: [0xf2,0x0f,0x2d,0xc1]
-cvtsd2siq %xmm0, %rax // CHECK: cvtsd2siq %xmm0, %rax
-cvtsd2sil %xmm0, %eax // CHECK: cvtsd2sil %xmm0, %eax
-cvtsd2si %xmm0, %rax // CHECK: cvtsd2siq %xmm0, %rax
+cvtsd2siq %xmm0, %rax // CHECK: cvtsd2si %xmm0, %rax
+cvtsd2sil %xmm0, %eax // CHECK: cvtsd2si %xmm0, %eax
+cvtsd2si %xmm0, %rax // CHECK: cvtsd2si %xmm0, %rax
cvttpd2dq %xmm1, %xmm0 // CHECK: cvttpd2dq %xmm1, %xmm0
diff --git a/test/MC/X86/x86_64-avx-encoding.s b/test/MC/X86/x86_64-avx-encoding.s
index 46ff9ead39bfc..6da9e21fef667 100644
--- a/test/MC/X86/x86_64-avx-encoding.s
+++ b/test/MC/X86/x86_64-avx-encoding.s
@@ -1404,25 +1404,25 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
// CHECK: encoding: [0xc5,0xfa,0x2c,0x01]
vcvttss2si (%rcx), %eax
-// CHECK: vcvtsi2ss (%rax), %xmm11, %xmm12
+// CHECK: vcvtsi2ssl (%rax), %xmm11, %xmm12
// CHECK: encoding: [0xc5,0x22,0x2a,0x20]
- vcvtsi2ss (%rax), %xmm11, %xmm12
+ vcvtsi2ssl (%rax), %xmm11, %xmm12
-// CHECK: vcvtsi2ss (%rax), %xmm11, %xmm12
+// CHECK: vcvtsi2ssl (%rax), %xmm11, %xmm12
// CHECK: encoding: [0xc5,0x22,0x2a,0x20]
- vcvtsi2ss (%rax), %xmm11, %xmm12
+ vcvtsi2ssl (%rax), %xmm11, %xmm12
// CHECK: vcvttsd2si (%rcx), %eax
// CHECK: encoding: [0xc5,0xfb,0x2c,0x01]
vcvttsd2si (%rcx), %eax
-// CHECK: vcvtsi2sd (%rax), %xmm11, %xmm12
+// CHECK: vcvtsi2sdl (%rax), %xmm11, %xmm12
// CHECK: encoding: [0xc5,0x23,0x2a,0x20]
- vcvtsi2sd (%rax), %xmm11, %xmm12
+ vcvtsi2sdl (%rax), %xmm11, %xmm12
-// CHECK: vcvtsi2sd (%rax), %xmm11, %xmm12
+// CHECK: vcvtsi2sdl (%rax), %xmm11, %xmm12
// CHECK: encoding: [0xc5,0x23,0x2a,0x20]
- vcvtsi2sd (%rax), %xmm11, %xmm12
+ vcvtsi2sdl (%rax), %xmm11, %xmm12
// CHECK: vmovaps (%rax), %xmm12
// CHECK: encoding: [0xc5,0x78,0x28,0x20]
@@ -1512,11 +1512,11 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
// CHECK: encoding: [0xc4,0x41,0x18,0x12,0xeb]
vmovhlps %xmm11, %xmm12, %xmm13
-// CHECK: vcvtss2sil %xmm11, %eax
+// CHECK: vcvtss2si %xmm11, %eax
// CHECK: encoding: [0xc4,0xc1,0x7a,0x2d,0xc3]
vcvtss2si %xmm11, %eax
-// CHECK: vcvtss2sil (%rax), %ebx
+// CHECK: vcvtss2si (%rax), %ebx
// CHECK: encoding: [0xc5,0xfa,0x2d,0x18]
vcvtss2si (%rax), %ebx
@@ -3860,29 +3860,29 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
// CHECK: encoding: [0xc4,0x63,0x2d,0x06,0x18,0x07]
vperm2f128 $7, (%rax), %ymm10, %ymm11
-// CHECK: vcvtsd2sil %xmm8, %r8d
+// CHECK: vcvtsd2si %xmm8, %r8d
// CHECK: encoding: [0xc4,0x41,0x7b,0x2d,0xc0]
- vcvtsd2sil %xmm8, %r8d
+ vcvtsd2si %xmm8, %r8d
-// CHECK: vcvtsd2sil (%rcx), %ecx
+// CHECK: vcvtsd2si (%rcx), %ecx
// CHECK: encoding: [0xc5,0xfb,0x2d,0x09]
- vcvtsd2sil (%rcx), %ecx
+ vcvtsd2si (%rcx), %ecx
-// CHECK: vcvtss2siq %xmm4, %rcx
+// CHECK: vcvtss2si %xmm4, %rcx
// CHECK: encoding: [0xc4,0xe1,0xfa,0x2d,0xcc]
- vcvtss2siq %xmm4, %rcx
+ vcvtss2si %xmm4, %rcx
-// CHECK: vcvtss2siq (%rcx), %r8
+// CHECK: vcvtss2si (%rcx), %r8
// CHECK: encoding: [0xc4,0x61,0xfa,0x2d,0x01]
- vcvtss2siq (%rcx), %r8
+ vcvtss2si (%rcx), %r8
-// CHECK: vcvtsi2sd %r8d, %xmm8, %xmm15
+// CHECK: vcvtsi2sdl %r8d, %xmm8, %xmm15
// CHECK: encoding: [0xc4,0x41,0x3b,0x2a,0xf8]
- vcvtsi2sd %r8d, %xmm8, %xmm15
+ vcvtsi2sdl %r8d, %xmm8, %xmm15
-// CHECK: vcvtsi2sd (%rbp), %xmm8, %xmm15
+// CHECK: vcvtsi2sdl (%rbp), %xmm8, %xmm15
// CHECK: encoding: [0xc5,0x3b,0x2a,0x7d,0x00]
- vcvtsi2sd (%rbp), %xmm8, %xmm15
+ vcvtsi2sdl (%rbp), %xmm8, %xmm15
// CHECK: vcvtsi2sdq %rcx, %xmm4, %xmm6
// CHECK: encoding: [0xc4,0xe1,0xdb,0x2a,0xf1]
@@ -3900,21 +3900,21 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
// CHECK: encoding: [0xc4,0xe1,0xda,0x2a,0x31]
vcvtsi2ssq (%rcx), %xmm4, %xmm6
-// CHECK: vcvttsd2siq %xmm4, %rcx
+// CHECK: vcvttsd2si %xmm4, %rcx
// CHECK: encoding: [0xc4,0xe1,0xfb,0x2c,0xcc]
- vcvttsd2siq %xmm4, %rcx
+ vcvttsd2si %xmm4, %rcx
-// CHECK: vcvttsd2siq (%rcx), %rcx
+// CHECK: vcvttsd2si (%rcx), %rcx
// CHECK: encoding: [0xc4,0xe1,0xfb,0x2c,0x09]
- vcvttsd2siq (%rcx), %rcx
+ vcvttsd2si (%rcx), %rcx
-// CHECK: vcvttss2siq %xmm4, %rcx
+// CHECK: vcvttss2si %xmm4, %rcx
// CHECK: encoding: [0xc4,0xe1,0xfa,0x2c,0xcc]
- vcvttss2siq %xmm4, %rcx
+ vcvttss2si %xmm4, %rcx
-// CHECK: vcvttss2siq (%rcx), %rcx
+// CHECK: vcvttss2si (%rcx), %rcx
// CHECK: encoding: [0xc4,0xe1,0xfa,0x2c,0x09]
- vcvttss2siq (%rcx), %rcx
+ vcvttss2si (%rcx), %rcx
// CHECK: vlddqu (%rax), %ymm12
// CHECK: encoding: [0xc5,0x7f,0xf0,0x20]
diff --git a/test/MC/X86/x86_64-fma4-encoding.s b/test/MC/X86/x86_64-fma4-encoding.s
index 805fc23cf4cf9..c9bd954e90496 100644
--- a/test/MC/X86/x86_64-fma4-encoding.s
+++ b/test/MC/X86/x86_64-fma4-encoding.s
@@ -25,6 +25,10 @@
// CHECK: encoding: [0xc4,0xe3,0xf9,0x6b,0xc2,0x10]
vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
+// CHECK: vfmaddsd %xmm10, %xmm1, %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xc3,0xf9,0x6b,0xc2,0x10]
+ vfmaddsd %xmm10, %xmm1, %xmm0, %xmm0
+
// CHECK: vfmaddps (%rcx), %xmm1, %xmm0, %xmm0
// CHECK: encoding: [0xc4,0xe3,0xf9,0x68,0x01,0x10]
vfmaddps (%rcx), %xmm1, %xmm0, %xmm0
@@ -73,6 +77,67 @@
// CHECK: encoding: [0xc4,0xe3,0xfd,0x69,0xc2,0x10]
vfmaddpd %ymm2, %ymm1, %ymm0, %ymm0
+// PR15040
+// CHECK: vfmaddss foo(%rip), %xmm1, %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xe3,0xf9,0x6a,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddss foo(%rip), %xmm1, %xmm0, %xmm0
+
+// CHECK: vfmaddss %xmm1, foo(%rip), %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xe3,0x79,0x6a,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddss %xmm1, foo(%rip),%xmm0, %xmm0
+
+// CHECK: vfmaddsd foo(%rip), %xmm1, %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xe3,0xf9,0x6b,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddsd foo(%rip), %xmm1, %xmm0, %xmm0
+
+// CHECK: vfmaddsd %xmm1, foo(%rip), %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xe3,0x79,0x6b,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddsd %xmm1, foo(%rip),%xmm0, %xmm0
+
+// CHECK: vfmaddps foo(%rip), %xmm1, %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xe3,0xf9,0x68,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddps foo(%rip), %xmm1, %xmm0, %xmm0
+
+// CHECK: vfmaddps %xmm1, foo(%rip), %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xe3,0x79,0x68,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddps %xmm1, foo(%rip),%xmm0, %xmm0
+
+// CHECK: vfmaddpd foo(%rip), %xmm1, %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xe3,0xf9,0x69,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddpd foo(%rip), %xmm1, %xmm0, %xmm0
+
+// CHECK: vfmaddpd %xmm1, foo(%rip), %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xe3,0x79,0x69,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddpd %xmm1, foo(%rip),%xmm0, %xmm0
+
+// CHECK: vfmaddps foo(%rip), %ymm1, %ymm0, %ymm0
+// CHECK: encoding: [0xc4,0xe3,0xfd,0x68,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddps foo(%rip), %ymm1, %ymm0, %ymm0
+
+// CHECK: vfmaddps %ymm1, foo(%rip), %ymm0, %ymm0
+// CHECK: encoding: [0xc4,0xe3,0x7d,0x68,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddps %ymm1, foo(%rip),%ymm0, %ymm0
+
+// CHECK: vfmaddpd foo(%rip), %ymm1, %ymm0, %ymm0
+// CHECK: encoding: [0xc4,0xe3,0xfd,0x69,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddpd foo(%rip), %ymm1, %ymm0, %ymm0
+
+// CHECK: vfmaddpd %ymm1, foo(%rip), %ymm0, %ymm0
+// CHECK: encoding: [0xc4,0xe3,0x7d,0x69,0x05,A,A,A,A,0x10]
+// CHECK: fixup A - offset: 5, value: foo-5, kind: reloc_riprel_4byte
+ vfmaddpd %ymm1, foo(%rip),%ymm0, %ymm0
+
// vfmsub
// CHECK: vfmsubss (%rcx), %xmm1, %xmm0, %xmm0
// CHECK: encoding: [0xc4,0xe3,0xf9,0x6e,0x01,0x10]
diff --git a/test/MC/X86/x86_64-rand-encoding.s b/test/MC/X86/x86_64-rand-encoding.s
new file mode 100644
index 0000000000000..3a8cb817bc1a2
--- /dev/null
+++ b/test/MC/X86/x86_64-rand-encoding.s
@@ -0,0 +1,49 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding %s | FileCheck %s
+
+// CHECK: rdrandw %ax
+// CHECK: encoding: [0x66,0x0f,0xc7,0xf0]
+ rdrand %ax
+
+// CHECK: rdrandl %eax
+// CHECK: encoding: [0x0f,0xc7,0xf0]
+ rdrand %eax
+
+// CHECK: rdrandq %rax
+// CHECK: encoding: [0x48,0x0f,0xc7,0xf0]
+ rdrand %rax
+
+// CHECK: rdrandw %r11w
+// CHECK: encoding: [0x66,0x41,0x0f,0xc7,0xf3]
+ rdrand %r11w
+
+// CHECK: rdrandl %r11d
+// CHECK: encoding: [0x41,0x0f,0xc7,0xf3]
+ rdrand %r11d
+
+// CHECK: rdrandq %r11
+// CHECK: encoding: [0x49,0x0f,0xc7,0xf3]
+ rdrand %r11
+
+// CHECK: rdseedw %ax
+// CHECK: encoding: [0x66,0x0f,0xc7,0xf8]
+ rdseed %ax
+
+// CHECK: rdseedl %eax
+// CHECK: encoding: [0x0f,0xc7,0xf8]
+ rdseed %eax
+
+// CHECK: rdseedq %rax
+// CHECK: encoding: [0x48,0x0f,0xc7,0xf8]
+ rdseed %rax
+
+// CHECK: rdseedw %r11w
+// CHECK: encoding: [0x66,0x41,0x0f,0xc7,0xfb]
+ rdseed %r11w
+
+// CHECK: rdseedl %r11d
+// CHECK: encoding: [0x41,0x0f,0xc7,0xfb]
+ rdseed %r11d
+
+// CHECK: rdseedq %r11
+// CHECK: encoding: [0x49,0x0f,0xc7,0xfb]
+ rdseed %r11
diff --git a/test/MC/X86/x86_64-rtm-encoding.s b/test/MC/X86/x86_64-rtm-encoding.s
index 44d6bacb7f32f..d9975d67b3149 100644
--- a/test/MC/X86/x86_64-rtm-encoding.s
+++ b/test/MC/X86/x86_64-rtm-encoding.s
@@ -8,6 +8,10 @@
// CHECK: encoding: [0x0f,0x01,0xd5]
xend
+// CHECK: xtest
+// CHECK: encoding: [0x0f,0x01,0xd6]
+ xtest
+
// CHECK: xabort
// CHECK: encoding: [0xc6,0xf8,0x0d]
xabort $13
diff --git a/test/MC/X86/x86_errors.s b/test/MC/X86/x86_errors.s
index f161e06cb580d..6e14d62fda4c9 100644
--- a/test/MC/X86/x86_errors.s
+++ b/test/MC/X86/x86_errors.s
@@ -18,7 +18,7 @@ addl $0, 0(%rax)
movl 0(%rax), 0(%edx) // error: invalid operand for instruction
-// 32: error: instruction requires a CPU feature not currently enabled
+// 32: error: instruction requires: 64-bit mode
sysexitq
// rdar://10710167
diff --git a/test/MC/X86/x86_long_nop.s b/test/MC/X86/x86_long_nop.s
new file mode 100644
index 0000000000000..ac1bc08ff38ba
--- /dev/null
+++ b/test/MC/X86/x86_long_nop.s
@@ -0,0 +1,15 @@
+# RUN: llvm-mc -filetype=obj -arch=x86 -triple=x86_64-pc-linux-gnu %s | llvm-objdump -d -no-show-raw-insn - | FileCheck %s
+# RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu %s | llvm-objdump -d -no-show-raw-insn - | FileCheck %s
+# RUN: llvm-mc -filetype=obj -arch=x86 -triple=x86_64-apple-darwin10.0 %s | llvm-objdump -d -no-show-raw-insn - | FileCheck %s
+# RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-apple-darwin8 %s | llvm-objdump -d -no-show-raw-insn - | FileCheck %s
+
+# Ensure alignment directives also emit sequences of 15-byte NOPs on processors
+# capable of using long NOPs.
+inc %eax
+.p2align 5
+inc %eax
+# CHECK: 0: inc
+# CHECK-NEXT: 1: nop
+# CHECK-NEXT: 10: nop
+# CHECK-NEXT: 1f: nop
+# CHECK-NEXT: 20: inc